LLVM  9.0.0svn
InstrBuilder.cpp
Go to the documentation of this file.
1 //===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// This file implements the InstrBuilder interface.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/MCA/InstrBuilder.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/MC/MCInst.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/WithColor.h"
21 
22 #define DEBUG_TYPE "llvm-mca"
23 
24 namespace llvm {
25 namespace mca {
26 
27 InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
28  const llvm::MCInstrInfo &mcii,
29  const llvm::MCRegisterInfo &mri,
30  const llvm::MCInstrAnalysis *mcia)
31  : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
32  FirstReturnInst(true) {
33  const MCSchedModel &SM = STI.getSchedModel();
34  ProcResourceMasks.resize(SM.getNumProcResourceKinds());
35  computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
36 }
37 
39  const MCSchedClassDesc &SCDesc,
40  const MCSubtargetInfo &STI,
41  ArrayRef<uint64_t> ProcResourceMasks) {
42  const MCSchedModel &SM = STI.getSchedModel();
43 
44  // Populate resources consumed.
45  using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
46  std::vector<ResourcePlusCycles> Worklist;
47 
48  // Track cycles contributed by resources that are in a "Super" relationship.
49  // This is required if we want to correctly match the behavior of method
50  // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
51  // of "consumed" processor resources and resource cycles, the logic in
52  // ExpandProcResource() doesn't update the number of resource cycles
53  // contributed by a "Super" resource to a group.
54  // We need to take this into account when we find that a processor resource is
55  // part of a group, and it is also used as the "Super" of other resources.
56  // This map stores the number of cycles contributed by sub-resources that are
57  // part of a "Super" resource. The key value is the "Super" resource mask ID.
58  DenseMap<uint64_t, unsigned> SuperResources;
59 
60  unsigned NumProcResources = SM.getNumProcResourceKinds();
61  APInt Buffers(NumProcResources, 0);
62 
63  bool AllInOrderResources = true;
64  bool AnyDispatchHazards = false;
65  for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
66  const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
68  uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
69  if (PR.BufferSize < 0) {
70  AllInOrderResources = false;
71  } else {
72  Buffers.setBit(PRE->ProcResourceIdx);
73  AnyDispatchHazards |= (PR.BufferSize == 0);
74  AllInOrderResources &= (PR.BufferSize <= 1);
75  }
76 
77  CycleSegment RCy(0, PRE->Cycles, false);
78  Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
79  if (PR.SuperIdx) {
80  uint64_t Super = ProcResourceMasks[PR.SuperIdx];
81  SuperResources[Super] += PRE->Cycles;
82  }
83  }
84 
85  ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
86 
87  // Sort elements by mask popcount, so that we prioritize resource units over
88  // resource groups, and smaller groups over larger groups.
89  sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
90  unsigned popcntA = countPopulation(A.first);
91  unsigned popcntB = countPopulation(B.first);
92  if (popcntA < popcntB)
93  return true;
94  if (popcntA > popcntB)
95  return false;
96  return A.first < B.first;
97  });
98 
99  uint64_t UsedResourceUnits = 0;
100  uint64_t UsedResourceGroups = 0;
101 
102  // Remove cycles contributed by smaller resources.
103  for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
104  ResourcePlusCycles &A = Worklist[I];
105  if (!A.second.size()) {
106  assert(countPopulation(A.first) > 1 && "Expected a group!");
107  UsedResourceGroups |= PowerOf2Floor(A.first);
108  continue;
109  }
110 
111  ID.Resources.emplace_back(A);
112  uint64_t NormalizedMask = A.first;
113  if (countPopulation(A.first) == 1) {
114  UsedResourceUnits |= A.first;
115  } else {
116  // Remove the leading 1 from the resource group mask.
117  NormalizedMask ^= PowerOf2Floor(NormalizedMask);
118  }
119 
120  for (unsigned J = I + 1; J < E; ++J) {
121  ResourcePlusCycles &B = Worklist[J];
122  if ((NormalizedMask & B.first) == NormalizedMask) {
123  B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
124  if (countPopulation(B.first) > 1)
125  B.second.NumUnits++;
126  }
127  }
128  }
129 
130  ID.UsedProcResUnits = UsedResourceUnits;
131  ID.UsedProcResGroups = UsedResourceGroups;
132 
133  // A SchedWrite may specify a number of cycles in which a resource group
134  // is reserved. For example (on target x86; cpu Haswell):
135  //
136  // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
137  // let ResourceCycles = [2, 2, 3];
138  // }
139  //
140  // This means:
141  // Resource units HWPort0 and HWPort1 are both used for 2cy.
142  // Resource group HWPort01 is the union of HWPort0 and HWPort1.
143  // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
144  // will not be usable for 2 entire cycles from instruction issue.
145  //
146  // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
147  // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
148  // extra delay on top of the 2 cycles latency.
149  // During those extra cycles, HWPort01 is not usable by other instructions.
150  for (ResourcePlusCycles &RPC : ID.Resources) {
151  if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
152  // Remove the leading 1 from the resource group mask.
153  uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
154  if ((Mask & UsedResourceUnits) == Mask)
155  RPC.second.setReserved();
156  }
157  }
158 
159  // Identify extra buffers that are consumed through super resources.
160  for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
161  for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
162  const MCProcResourceDesc &PR = *SM.getProcResource(I);
163  if (PR.BufferSize == -1)
164  continue;
165 
166  uint64_t Mask = ProcResourceMasks[I];
167  if (Mask != SR.first && ((Mask & SR.first) == SR.first))
168  Buffers.setBit(I);
169  }
170  }
171 
172  // Now set the buffers.
173  if (unsigned NumBuffers = Buffers.countPopulation()) {
174  ID.Buffers.resize(NumBuffers);
175  for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
176  if (Buffers[I]) {
177  --NumBuffers;
178  ID.Buffers[NumBuffers] = ProcResourceMasks[I];
179  }
180  }
181  }
182 
183  LLVM_DEBUG({
184  for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
185  dbgs() << "\t\tResource Mask=" << format_hex(R.first, 16) << ", "
186  << "Reserved=" << R.second.isReserved() << ", "
187  << "#Units=" << R.second.NumUnits << ", "
188  << "cy=" << R.second.size() << '\n';
189  for (const uint64_t R : ID.Buffers)
190  dbgs() << "\t\tBuffer Mask=" << format_hex(R, 16) << '\n';
191  dbgs() << "\t\t Used Units=" << format_hex(ID.UsedProcResUnits, 16) << '\n';
192  dbgs() << "\t\tUsed Groups=" << format_hex(ID.UsedProcResGroups, 16) << '\n';
193  });
194 }
195 
196 static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
197  const MCSchedClassDesc &SCDesc,
198  const MCSubtargetInfo &STI) {
199  if (MCDesc.isCall()) {
200  // We cannot estimate how long this call will take.
201  // Artificially set an arbitrarily high latency (100cy).
202  ID.MaxLatency = 100U;
203  return;
204  }
205 
206  int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
207  // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
208  ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
209 }
210 
211 static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
212  // Count register definitions, and skip non register operands in the process.
213  unsigned I, E;
214  unsigned NumExplicitDefs = MCDesc.getNumDefs();
215  for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
216  const MCOperand &Op = MCI.getOperand(I);
217  if (Op.isReg())
218  --NumExplicitDefs;
219  }
220 
221  if (NumExplicitDefs) {
222  return make_error<InstructionError<MCInst>>(
223  "Expected more register operand definitions.", MCI);
224  }
225 
226  if (MCDesc.hasOptionalDef()) {
227  // Always assume that the optional definition is the last operand.
228  const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
229  if (I == MCI.getNumOperands() || !Op.isReg()) {
230  std::string Message =
231  "expected a register operand for an optional definition. Instruction "
232  "has not been correctly analyzed.";
233  return make_error<InstructionError<MCInst>>(Message, MCI);
234  }
235  }
236 
237  return ErrorSuccess();
238 }
239 
240 void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
241  unsigned SchedClassID) {
242  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
243  const MCSchedModel &SM = STI.getSchedModel();
244  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
245 
246  // Assumptions made by this algorithm:
247  // 1. The number of explicit and implicit register definitions in a MCInst
248  // matches the number of explicit and implicit definitions according to
249  // the opcode descriptor (MCInstrDesc).
250  // 2. Uses start at index #(MCDesc.getNumDefs()).
251  // 3. There can only be a single optional register definition, an it is
252  // always the last operand of the sequence (excluding extra operands
253  // contributed by variadic opcodes).
254  //
255  // These assumptions work quite well for most out-of-order in-tree targets
256  // like x86. This is mainly because the vast majority of instructions is
257  // expanded to MCInst using a straightforward lowering logic that preserves
258  // the ordering of the operands.
259  //
260  // About assumption 1.
261  // The algorithm allows non-register operands between register operand
262  // definitions. This helps to handle some special ARM instructions with
263  // implicit operand increment (-mtriple=armv7):
264  //
265  // vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
266  // @ <MCOperand Reg:59>
267  // @ <MCOperand Imm:0> (!!)
268  // @ <MCOperand Reg:67>
269  // @ <MCOperand Imm:0>
270  // @ <MCOperand Imm:14>
271  // @ <MCOperand Reg:0>>
272  //
273  // MCDesc reports:
274  // 6 explicit operands.
275  // 1 optional definition
276  // 2 explicit definitions (!!)
277  //
278  // The presence of an 'Imm' operand between the two register definitions
279  // breaks the assumption that "register definitions are always at the
280  // beginning of the operand sequence".
281  //
282  // To workaround this issue, this algorithm ignores (i.e. skips) any
283  // non-register operands between register definitions. The optional
284  // definition is still at index #(NumOperands-1).
285  //
286  // According to assumption 2. register reads start at #(NumExplicitDefs-1).
287  // That means, register R1 from the example is both read and written.
288  unsigned NumExplicitDefs = MCDesc.getNumDefs();
289  unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
290  unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
291  unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
292  if (MCDesc.hasOptionalDef())
293  TotalDefs++;
294 
295  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
296  ID.Writes.resize(TotalDefs + NumVariadicOps);
297  // Iterate over the operands list, and skip non-register operands.
298  // The first NumExplictDefs register operands are expected to be register
299  // definitions.
300  unsigned CurrentDef = 0;
301  unsigned i = 0;
302  for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
303  const MCOperand &Op = MCI.getOperand(i);
304  if (!Op.isReg())
305  continue;
306 
307  WriteDescriptor &Write = ID.Writes[CurrentDef];
308  Write.OpIndex = i;
309  if (CurrentDef < NumWriteLatencyEntries) {
310  const MCWriteLatencyEntry &WLE =
311  *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
312  // Conservatively default to MaxLatency.
313  Write.Latency =
314  WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
316  } else {
317  // Assign a default latency for this write.
318  Write.Latency = ID.MaxLatency;
319  Write.SClassOrWriteResourceID = 0;
320  }
321  Write.IsOptionalDef = false;
322  LLVM_DEBUG({
323  dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
324  << ", Latency=" << Write.Latency
325  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
326  });
327  CurrentDef++;
328  }
329 
330  assert(CurrentDef == NumExplicitDefs &&
331  "Expected more register operand definitions.");
332  for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
333  unsigned Index = NumExplicitDefs + CurrentDef;
334  WriteDescriptor &Write = ID.Writes[Index];
335  Write.OpIndex = ~CurrentDef;
336  Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
337  if (Index < NumWriteLatencyEntries) {
338  const MCWriteLatencyEntry &WLE =
339  *STI.getWriteLatencyEntry(&SCDesc, Index);
340  // Conservatively default to MaxLatency.
341  Write.Latency =
342  WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
344  } else {
345  // Assign a default latency for this write.
346  Write.Latency = ID.MaxLatency;
347  Write.SClassOrWriteResourceID = 0;
348  }
349 
350  Write.IsOptionalDef = false;
351  assert(Write.RegisterID != 0 && "Expected a valid phys register!");
352  LLVM_DEBUG({
353  dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
354  << ", PhysReg=" << MRI.getName(Write.RegisterID)
355  << ", Latency=" << Write.Latency
356  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
357  });
358  }
359 
360  if (MCDesc.hasOptionalDef()) {
361  WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
362  Write.OpIndex = MCDesc.getNumOperands() - 1;
363  // Assign a default latency for this write.
364  Write.Latency = ID.MaxLatency;
365  Write.SClassOrWriteResourceID = 0;
366  Write.IsOptionalDef = true;
367  LLVM_DEBUG({
368  dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
369  << ", Latency=" << Write.Latency
370  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
371  });
372  }
373 
374  if (!NumVariadicOps)
375  return;
376 
377  // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
378  // "unmodeledSideEffects', then this logic optimistically assumes that any
379  // extra register operands in the variadic sequence is not a register
380  // definition.
381  //
382  // Otherwise, we conservatively assume that any register operand from the
383  // variadic sequence is both a register read and a register write.
384  bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
385  !MCDesc.hasUnmodeledSideEffects();
386  CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
387  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
388  I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
389  const MCOperand &Op = MCI.getOperand(OpIndex);
390  if (!Op.isReg())
391  continue;
392 
393  WriteDescriptor &Write = ID.Writes[CurrentDef];
394  Write.OpIndex = OpIndex;
395  // Assign a default latency for this write.
396  Write.Latency = ID.MaxLatency;
397  Write.SClassOrWriteResourceID = 0;
398  Write.IsOptionalDef = false;
399  ++CurrentDef;
400  LLVM_DEBUG({
401  dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
402  << ", Latency=" << Write.Latency
403  << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
404  });
405  }
406 
407  ID.Writes.resize(CurrentDef);
408 }
409 
410 void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
411  unsigned SchedClassID) {
412  const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
413  unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
414  unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
415  // Remove the optional definition.
416  if (MCDesc.hasOptionalDef())
417  --NumExplicitUses;
418  unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
419  unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
420  ID.Reads.resize(TotalUses);
421  unsigned CurrentUse = 0;
422  for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
423  ++I, ++OpIndex) {
424  const MCOperand &Op = MCI.getOperand(OpIndex);
425  if (!Op.isReg())
426  continue;
427 
428  ReadDescriptor &Read = ID.Reads[CurrentUse];
429  Read.OpIndex = OpIndex;
430  Read.UseIndex = I;
431  Read.SchedClassID = SchedClassID;
432  ++CurrentUse;
433  LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
434  << ", UseIndex=" << Read.UseIndex << '\n');
435  }
436 
437  // For the purpose of ReadAdvance, implicit uses come directly after explicit
438  // uses. The "UseIndex" must be updated according to that implicit layout.
439  for (unsigned I = 0; I < NumImplicitUses; ++I) {
440  ReadDescriptor &Read = ID.Reads[CurrentUse + I];
441  Read.OpIndex = ~I;
442  Read.UseIndex = NumExplicitUses + I;
443  Read.RegisterID = MCDesc.getImplicitUses()[I];
444  Read.SchedClassID = SchedClassID;
445  LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
446  << ", UseIndex=" << Read.UseIndex << ", RegisterID="
447  << MRI.getName(Read.RegisterID) << '\n');
448  }
449 
450  CurrentUse += NumImplicitUses;
451 
452  // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
453  // "unmodeledSideEffects", then this logic optimistically assumes that any
454  // extra register operands in the variadic sequence are not register
455  // definition.
456 
457  bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
458  !MCDesc.hasUnmodeledSideEffects();
459  for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
460  I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
461  const MCOperand &Op = MCI.getOperand(OpIndex);
462  if (!Op.isReg())
463  continue;
464 
465  ReadDescriptor &Read = ID.Reads[CurrentUse];
466  Read.OpIndex = OpIndex;
467  Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
468  Read.SchedClassID = SchedClassID;
469  ++CurrentUse;
470  LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
471  << ", UseIndex=" << Read.UseIndex << '\n');
472  }
473 
474  ID.Reads.resize(CurrentUse);
475 }
476 
477 Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
478  const MCInst &MCI) const {
479  if (ID.NumMicroOps != 0)
480  return ErrorSuccess();
481 
482  bool UsesMemory = ID.MayLoad || ID.MayStore;
483  bool UsesBuffers = !ID.Buffers.empty();
484  bool UsesResources = !ID.Resources.empty();
485  if (!UsesMemory && !UsesBuffers && !UsesResources)
486  return ErrorSuccess();
487 
488  StringRef Message;
489  if (UsesMemory) {
490  Message = "found an inconsistent instruction that decodes "
491  "into zero opcodes and that consumes load/store "
492  "unit resources.";
493  } else {
494  Message = "found an inconsistent instruction that decodes "
495  "to zero opcodes and that consumes scheduler "
496  "resources.";
497  }
498 
499  return make_error<InstructionError<MCInst>>(Message, MCI);
500 }
501 
503 InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
505  "Itineraries are not yet supported!");
506 
507  // Obtain the instruction descriptor from the opcode.
508  unsigned short Opcode = MCI.getOpcode();
509  const MCInstrDesc &MCDesc = MCII.get(Opcode);
510  const MCSchedModel &SM = STI.getSchedModel();
511 
512  // Then obtain the scheduling class information from the instruction.
513  unsigned SchedClassID = MCDesc.getSchedClass();
514  bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
515 
516  // Try to solve variant scheduling classes.
517  if (IsVariant) {
518  unsigned CPUID = SM.getProcessorID();
519  while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
520  SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
521 
522  if (!SchedClassID) {
523  return make_error<InstructionError<MCInst>>(
524  "unable to resolve scheduling class for write variant.", MCI);
525  }
526  }
527 
528  // Check if this instruction is supported. Otherwise, report an error.
529  const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
530  if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
531  return make_error<InstructionError<MCInst>>(
532  "found an unsupported instruction in the input assembly sequence.",
533  MCI);
534  }
535 
536  LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
537  LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
538 
539  // Create a new empty descriptor.
540  std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
541  ID->NumMicroOps = SCDesc.NumMicroOps;
542  ID->SchedClassID = SchedClassID;
543 
544  if (MCDesc.isCall() && FirstCallInst) {
545  // We don't correctly model calls.
546  WithColor::warning() << "found a call in the input assembly sequence.\n";
547  WithColor::note() << "call instructions are not correctly modeled. "
548  << "Assume a latency of 100cy.\n";
549  FirstCallInst = false;
550  }
551 
552  if (MCDesc.isReturn() && FirstReturnInst) {
553  WithColor::warning() << "found a return instruction in the input"
554  << " assembly sequence.\n";
555  WithColor::note() << "program counter updates are ignored.\n";
556  FirstReturnInst = false;
557  }
558 
559  ID->MayLoad = MCDesc.mayLoad();
560  ID->MayStore = MCDesc.mayStore();
561  ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
562  ID->BeginGroup = SCDesc.BeginGroup;
563  ID->EndGroup = SCDesc.EndGroup;
564 
565  initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
566  computeMaxLatency(*ID, MCDesc, SCDesc, STI);
567 
568  if (Error Err = verifyOperands(MCDesc, MCI))
569  return std::move(Err);
570 
571  populateWrites(*ID, MCI, SchedClassID);
572  populateReads(*ID, MCI, SchedClassID);
573 
574  LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
575  LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
576 
577  // Sanity check on the instruction descriptor.
578  if (Error Err = verifyInstrDesc(*ID, MCI))
579  return std::move(Err);
580 
581  // Now add the new descriptor.
582  SchedClassID = MCDesc.getSchedClass();
583  bool IsVariadic = MCDesc.isVariadic();
584  if (!IsVariadic && !IsVariant) {
585  Descriptors[MCI.getOpcode()] = std::move(ID);
586  return *Descriptors[MCI.getOpcode()];
587  }
588 
589  VariantDescriptors[&MCI] = std::move(ID);
590  return *VariantDescriptors[&MCI];
591 }
592 
594 InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
595  if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
596  return *Descriptors[MCI.getOpcode()];
597 
598  if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
599  return *VariantDescriptors[&MCI];
600 
601  return createInstrDescImpl(MCI);
602 }
603 
606  Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
607  if (!DescOrErr)
608  return DescOrErr.takeError();
609  const InstrDesc &D = *DescOrErr;
610  std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
611 
612  // Check if this is a dependency breaking instruction.
613  APInt Mask;
614 
615  bool IsZeroIdiom = false;
616  bool IsDepBreaking = false;
617  if (MCIA) {
618  unsigned ProcID = STI.getSchedModel().getProcessorID();
619  IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
620  IsDepBreaking =
621  IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
622  if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
623  NewIS->setOptimizableMove();
624  }
625 
626  // Initialize Reads first.
627  for (const ReadDescriptor &RD : D.Reads) {
628  int RegID = -1;
629  if (!RD.isImplicitRead()) {
630  // explicit read.
631  const MCOperand &Op = MCI.getOperand(RD.OpIndex);
632  // Skip non-register operands.
633  if (!Op.isReg())
634  continue;
635  RegID = Op.getReg();
636  } else {
637  // Implicit read.
638  RegID = RD.RegisterID;
639  }
640 
641  // Skip invalid register operands.
642  if (!RegID)
643  continue;
644 
645  // Okay, this is a register operand. Create a ReadState for it.
646  assert(RegID > 0 && "Invalid register ID found!");
647  NewIS->getUses().emplace_back(RD, RegID);
648  ReadState &RS = NewIS->getUses().back();
649 
650  if (IsDepBreaking) {
651  // A mask of all zeroes means: explicit input operands are not
652  // independent.
653  if (Mask.isNullValue()) {
654  if (!RD.isImplicitRead())
656  } else {
657  // Check if this register operand is independent according to `Mask`.
658  // Note that Mask may not have enough bits to describe all explicit and
659  // implicit input operands. If this register operand doesn't have a
660  // corresponding bit in Mask, then conservatively assume that it is
661  // dependent.
662  if (Mask.getBitWidth() > RD.UseIndex) {
663  // Okay. This map describe register use `RD.UseIndex`.
664  if (Mask[RD.UseIndex])
666  }
667  }
668  }
669  }
670 
671  // Early exit if there are no writes.
672  if (D.Writes.empty())
673  return std::move(NewIS);
674 
675  // Track register writes that implicitly clear the upper portion of the
676  // underlying super-registers using an APInt.
677  APInt WriteMask(D.Writes.size(), 0);
678 
679  // Now query the MCInstrAnalysis object to obtain information about which
680  // register writes implicitly clear the upper portion of a super-register.
681  if (MCIA)
682  MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
683 
684  // Initialize writes.
685  unsigned WriteIndex = 0;
686  for (const WriteDescriptor &WD : D.Writes) {
687  unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
688  : MCI.getOperand(WD.OpIndex).getReg();
689  // Check if this is a optional definition that references NoReg.
690  if (WD.IsOptionalDef && !RegID) {
691  ++WriteIndex;
692  continue;
693  }
694 
695  assert(RegID && "Expected a valid register ID!");
696  NewIS->getDefs().emplace_back(WD, RegID,
697  /* ClearsSuperRegs */ WriteMask[WriteIndex],
698  /* WritesZero */ IsZeroIdiom);
699  ++WriteIndex;
700  }
701 
702  return std::move(NewIS);
703 }
704 } // namespace mca
705 } // namespace llvm
Expected< std::unique_ptr< Instruction > > createInstruction(const MCInst &MCI)
unsigned getNumImplicitUses() const
Return the number of implicit uses this instruction has.
Definition: MCInstrDesc.h:526
A sequence of cycles.
Definition: Instruction.h:279
unsigned getNumImplicitDefs() const
Return the number of implicit defs this instruct has.
Definition: MCInstrDesc.h:548
This class represents lattice values for constants.
Definition: AllocatorList.h:23
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition: Format.h:185
SmallVector< uint64_t, 4 > Buffers
Definition: Instruction.h:346
Subclass of Error for the sole purpose of identifying the success path in the type system...
Definition: Error.h:324
unsigned UsedProcResGroups
Definition: Instruction.h:349
bool isImplicitRead() const
Definition: Instruction.h:78
A register read descriptor.
Definition: Instruction.h:64
const MCPhysReg * getImplicitUses() const
Return a list of registers that are potentially read by any instance of this machine instruction...
Definition: MCInstrDesc.h:523
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by other flags.
Definition: MCInstrDesc.h:418
const MCProcResourceDesc * getProcResource(unsigned ProcResourceIdx) const
Definition: MCSchedule.h:339
bool isReg() const
Definition: MCInst.h:57
static raw_ostream & warning()
Convenience method for printing "warning: " to stderr.
Definition: WithColor.cpp:62
unsigned UsedProcResUnits
Definition: Instruction.h:348
block Block Frequency true
unsigned getProcessorID() const
Definition: MCSchedule.h:317
const MCSchedClassDesc * getSchedClassDesc(unsigned SchedClassIdx) const
Definition: MCSchedule.h:346
Error takeError()
Take ownership of the stored error.
Definition: Error.h:552
bool mayLoad() const
Return true if this instruction could possibly read memory.
Definition: MCInstrDesc.h:398
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:245
void setBit(unsigned BitPosition)
Set a given bit to 1.
Definition: APInt.h:1402
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:210
Tracks register operand latency in cycles.
Definition: Instruction.h:219
SmallVector< ReadDescriptor, 4 > Reads
Definition: Instruction.h:339
static raw_ostream & note()
Convenience method for printing "note: " to stderr.
Definition: WithColor.cpp:64
Tagged union holding either a T or a Error.
Definition: CachePruning.h:22
This file implements a class to represent arbitrary precision integral constant values and operations...
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:64
virtual bool isOptimizableRegisterMove(const MCInst &MI, unsigned CPUID) const
Returns true if MI is a candidate for move elimination.
void setIndependentFromDef()
Definition: Instruction.h:262
Helper used by class InstrDesc to describe how hardware resources are used.
Definition: Instruction.h:326
uint16_t NumWriteProcResEntries
Definition: MCSchedule.h:121
const MCWriteLatencyEntry * getWriteLatencyEntry(const MCSchedClassDesc *SC, unsigned DefIdx) const
A register write descriptor.
Definition: Instruction.h:36
void computeProcResourceMasks(const MCSchedModel &SM, MutableArrayRef< uint64_t > Masks)
Populates vector Masks with processor resource masks.
Definition: Support.cpp:39
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:158
const char * getName(unsigned RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register...
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
unsigned getSchedClass() const
Return the scheduling class for this instruction.
Definition: MCInstrDesc.h:576
const MCPhysReg * getImplicitDefs() const
Return a list of registers that are potentially written by any instance of this machine instruction...
Definition: MCInstrDesc.h:545
virtual bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, APInt &Writes) const
Returns true if at least one of the register writes performed by.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
unsigned countPopulation() const
Count the number of bits set.
Definition: APInt.h:1657
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
Definition: MCInstrDesc.h:238
Identify one of the processor resource kinds consumed by a particular scheduling class for the specif...
Definition: MCSchedule.h:64
bool isVariadic() const
Return true if this instruction can have a variable number of operands.
Definition: MCInstrDesc.h:234
static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Summarize the scheduling resources required for an instruction of a particular scheduling class...
Definition: MCSchedule.h:110
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:23
virtual bool isDependencyBreaking(const MCInst &MI, APInt &Mask, unsigned CPUID) const
Returns true if MI is a dependency breaking instruction for the subtarget associated with CPUID ...
unsigned getNumOperands() const
Definition: MCInst.h:181
SmallVector< std::pair< uint64_t, ResourceUsage >, 4 > Resources
Definition: Instruction.h:343
StringRef getName(unsigned Opcode) const
Returns the name for the instructions with the given opcode.
Definition: MCInstrInfo.h:50
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1115
static const unsigned short InvalidNumMicroOps
Definition: MCSchedule.h:111
bool hasInstrSchedModel() const
Does this machine model include instruction-level scheduling.
Definition: MCSchedule.h:320
unsigned countPopulation(T Value)
Count the number of set bits in a value.
Definition: MathExtras.h:519
Specify the latency in cpu cycles for a particular scheduling class and def index.
Definition: MCSchedule.h:78
bool isImplicitWrite() const
Definition: Instruction.h:60
Define a kind of processor resource that will be modeled by the scheduler.
Definition: MCSchedule.h:32
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:179
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:225
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
Class for arbitrary precision integers.
Definition: APInt.h:69
bool isVariant() const
Definition: MCSchedule.h:130
bool mayStore() const
Return true if this instruction could possibly modify memory.
Definition: MCInstrDesc.h:404
virtual bool isZeroIdiom(const MCInst &MI, APInt &Mask, unsigned CPUID) const
Returns true if MI is a dependency breaking zero-idiom for the given subtarget.
An instruction descriptor.
Definition: Instruction.h:337
static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc, const MCSchedClassDesc &SCDesc, const MCSubtargetInfo &STI)
static int computeInstrLatency(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Returns the latency value for the scheduling class.
Definition: MCSchedule.cpp:40
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Definition: MCInstrInfo.h:44
#define I(x, y, z)
Definition: MD5.cpp:58
bool isCall() const
Return true if the instruction is a call.
Definition: MCInstrDesc.h:257
Generic base class for all target subtargets.
virtual unsigned resolveVariantSchedClass(unsigned SchedClass, const MCInst *MI, unsigned CPUID) const
Resolve a variant scheduling class for the given MCInst and CPU.
const MCWriteProcResEntry * getWriteProcResBegin(const MCSchedClassDesc *SC) const
Return an iterator at the first process resource consumed by the given scheduling class...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
A builder class for instructions that are statically analyzed by llvm-mca.
uint64_t PowerOf2Floor(uint64_t A)
Returns the power of two which is less than or equal to the given value.
Definition: MathExtras.h:651
Lightweight error class with error context and mandatory checking.
Definition: Error.h:157
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E&#39;s largest value.
Definition: BitmaskEnum.h:80
uint16_t NumWriteLatencyEntries
Definition: MCSchedule.h:123
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
unsigned getOpcode() const
Definition: MCInst.h:171
#define LLVM_DEBUG(X)
Definition: Debug.h:122
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:34
static void initializeUsedResources(InstrDesc &ID, const MCSchedClassDesc &SCDesc, const MCSubtargetInfo &STI, ArrayRef< uint64_t > ProcResourceMasks)
Machine model for scheduling, bundling, and heuristics.
Definition: MCSchedule.h:244
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget&#39;s CPU.
unsigned getNumProcResourceKinds() const
Definition: MCSchedule.h:335
SmallVector< WriteDescriptor, 4 > Writes
Definition: Instruction.h:338
void resize(size_type N)
Definition: SmallVector.h:343