LLVM 17.0.0git
X86PreTileConfig.cpp
Go to the documentation of this file.
1//===-- X86PreTileConfig.cpp - Tile Register Pre-configure-----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file Pass to pre-config the shapes of AMX registers
10/// AMX register needs to be configured before use. The shapes of AMX register
11/// are encoded in the 1st and 2nd machine operand of AMX pseudo instructions.
12///
13/// The instruction ldtilecfg is used to config the shapes. It must be reachable
14/// for all variable shapes. ldtilecfg will be inserted more than once if we
15/// cannot find a dominating point for all AMX instructions.
16///
17/// The configure register is caller saved according to ABI. We need to insert
18/// ldtilecfg again after the call instruction if callee clobbers any AMX
19/// registers.
20///
21/// This pass calculates all points that ldtilecfg need to be inserted to and
22/// insert them. It reports error if the reachability conditions aren't met.
23//
24//===----------------------------------------------------------------------===//
25
26#include "X86.h"
27#include "X86InstrBuilder.h"
29#include "X86RegisterInfo.h"
30#include "X86Subtarget.h"
36#include "llvm/CodeGen/Passes.h"
40
41using namespace llvm;
42
43#define DEBUG_TYPE "tile-pre-config"
44
45static void emitErrorMsg(MachineFunction &MF) {
48 MF.getName() +
49 ": Failed to config tile register, please define the shape earlier");
50}
51
52namespace {
53
54struct MIRef {
55 MachineInstr *MI = nullptr;
56 MachineBasicBlock *MBB = nullptr;
57 // A virtual position for instruction that will be inserted after MI.
58 size_t Pos = 0;
59 MIRef() = default;
60 MIRef(MachineBasicBlock *MBB) : MBB(MBB) {
61 for (auto I = MBB->begin(), E = MBB->end(); I != E && I->isPHI();
62 ++I, ++Pos)
63 MI = &*I;
64 }
65 MIRef(MachineInstr *MI)
66 : MI(MI), MBB(MI->getParent()),
67 Pos(std::distance(MBB->instr_begin(), ++MI->getIterator())) {}
68 MIRef(MachineInstr *MI, MachineBasicBlock *MBB)
69 : MI(MI), MBB(MBB),
70 Pos(std::distance(MBB->instr_begin(), ++MI->getIterator())) {}
71 MIRef(MachineInstr *MI, MachineBasicBlock *MBB, size_t Pos)
72 : MI(MI), MBB(MBB), Pos(Pos) {}
73 operator bool() const { return MBB != nullptr; }
74 bool operator==(const MIRef &RHS) const {
75 return MI == RHS.MI && MBB == RHS.MBB;
76 }
77 bool operator!=(const MIRef &RHS) const { return !(*this == RHS); }
78 bool operator<(const MIRef &RHS) const {
79 // Comparison between different BBs happens when inserting a MIRef into set.
80 // So we compare MBB first to make the insertion happy.
81 return MBB < RHS.MBB || (MBB == RHS.MBB && Pos < RHS.Pos);
82 }
83 bool operator>(const MIRef &RHS) const {
84 // Comparison between different BBs happens when inserting a MIRef into set.
85 // So we compare MBB first to make the insertion happy.
86 return MBB > RHS.MBB || (MBB == RHS.MBB && Pos > RHS.Pos);
87 }
88};
89
90struct BBInfo {
91 MIRef FirstAMX;
92 MIRef LastCall;
93 bool HasAMXRegLiveIn = false;
94 bool TileCfgForbidden = false;
95 bool NeedTileCfgLiveIn = false;
96};
97
98class X86PreTileConfig : public MachineFunctionPass {
99 MachineRegisterInfo *MRI = nullptr;
100 const MachineLoopInfo *MLI = nullptr;
104
105 /// Check if the callee will clobber AMX registers.
106 bool isDestructiveCall(MachineInstr &MI, BitVector UsableRegs) {
107 auto Iter = llvm::find_if(
108 MI.operands(), [](MachineOperand &MO) { return MO.isRegMask(); });
109 if (Iter == MI.operands_end())
110 return false;
111 UsableRegs.clearBitsInMask(Iter->getRegMask());
112 return !UsableRegs.none();
113 }
114
115 /// Check if MI is AMX pseudo instruction.
116 bool isAMXInstruction(MachineInstr &MI) {
117 if (MI.isPHI() || MI.isDebugInstr() || MI.getNumOperands() < 3)
118 return false;
119 MachineOperand &MO = MI.getOperand(0);
120 // We can simply check if it is AMX instruction by its def.
121 // But we should exclude old API which uses physical registers.
122 if (MO.isReg() && MO.getReg().isVirtual() &&
123 MRI->getRegClass(MO.getReg())->getID() == X86::TILERegClassID) {
124 collectShapeInfo(MI);
125 return true;
126 }
127 // PTILESTOREDV is the only exception that doesn't def a AMX register.
128 return MI.getOpcode() == X86::PTILESTOREDV;
129 }
130
131 /// Check if it is an edge from loop bottom to loop head.
132 bool isLoopBackEdge(MachineBasicBlock *Header, MachineBasicBlock *Bottom) {
133 if (!MLI->isLoopHeader(Header))
134 return false;
135 auto *ML = MLI->getLoopFor(Header);
136 if (ML->contains(Bottom) && ML->isLoopLatch(Bottom))
137 return true;
138
139 return false;
140 }
141
142 /// Collect the shape def information for later use.
143 void collectShapeInfo(MachineInstr &MI);
144
145 /// Try to hoist shapes definded below AMX instructions.
146 bool hoistShapesInBB(MachineBasicBlock *MBB, SmallVectorImpl<MIRef> &Shapes) {
147 MIRef &FirstAMX = BBVisitedInfo[MBB].FirstAMX;
148 auto FirstShapeBelowAMX = llvm::lower_bound(Shapes, FirstAMX);
149 auto InsertPoint = FirstAMX.MI->getIterator();
150 for (auto I = FirstShapeBelowAMX, E = Shapes.end(); I != E; ++I) {
151 // Do not hoist instructions that access memory.
152 if (I->MI->mayLoadOrStore())
153 return false;
154 for (auto &MO : I->MI->operands()) {
155 if (MO.isDef())
156 continue;
157 // Do not hoist instructions if the sources' def under AMX instruction.
158 // TODO: We can handle isMoveImmediate MI here.
159 if (MO.isReg() && MIRef(MRI->getVRegDef(MO.getReg())) > FirstAMX)
160 return false;
161 // TODO: Maybe need more checks here.
162 }
163 MBB->insert(InsertPoint, I->MI->removeFromParent());
164 }
165 // We only need to mark the last shape in the BB now.
166 Shapes.clear();
167 Shapes.push_back(MIRef(&*--InsertPoint, MBB));
168 return true;
169 }
170
171public:
172 X86PreTileConfig() : MachineFunctionPass(ID) {}
173
174 /// Return the pass name.
175 StringRef getPassName() const override {
176 return "Tile Register Pre-configure";
177 }
178
179 /// X86PreTileConfig analysis usage.
180 void getAnalysisUsage(AnalysisUsage &AU) const override {
181 AU.setPreservesAll();
184 }
185
186 /// Clear MF related structures.
187 void releaseMemory() override {
188 ShapeBBs.clear();
189 DefVisited.clear();
190 BBVisitedInfo.clear();
191 }
192
193 /// Perform ldtilecfg instructions inserting.
194 bool runOnMachineFunction(MachineFunction &MF) override;
195
196 static char ID;
197};
198
199} // end anonymous namespace
200
201char X86PreTileConfig::ID = 0;
202
203INITIALIZE_PASS_BEGIN(X86PreTileConfig, "tilepreconfig",
204 "Tile Register Pre-configure", false, false)
208
209void X86PreTileConfig::collectShapeInfo(MachineInstr &MI) {
210 auto RecordShape = [&](MachineInstr *MI, MachineBasicBlock *MBB) {
211 MIRef MIR(MI, MBB);
212 auto I = llvm::lower_bound(ShapeBBs[MBB], MIR);
213 if (I == ShapeBBs[MBB].end() || *I != MIR)
214 ShapeBBs[MBB].insert(I, MIR);
215 };
216
218 {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()});
219 while (!WorkList.empty()) {
220 Register R = WorkList.pop_back_val();
221 MachineInstr *DefMI = MRI->getVRegDef(R);
222 assert(DefMI && "R must has one define instruction");
223 MachineBasicBlock *DefMBB = DefMI->getParent();
224 if (DefMI->isMoveImmediate() || !DefVisited.insert(DefMI).second)
225 continue;
226 if (DefMI->isPHI()) {
227 for (unsigned I = 1; I < DefMI->getNumOperands(); I += 2)
228 if (isLoopBackEdge(DefMBB, DefMI->getOperand(I + 1).getMBB()))
229 RecordShape(DefMI, DefMBB); // In this case, PHI is also a shape def.
230 else
231 WorkList.push_back(DefMI->getOperand(I).getReg());
232 } else {
233 RecordShape(DefMI, DefMBB);
234 }
235 }
236}
237
238bool X86PreTileConfig::runOnMachineFunction(MachineFunction &MF) {
240 const TargetInstrInfo *TII = ST.getInstrInfo();
241 const TargetRegisterInfo *TRI = ST.getRegisterInfo();
242 const TargetRegisterClass *RC = TRI->getRegClass(X86::TILERegClassID);
244
245 BitVector AMXRegs(TRI->getNumRegs());
246 for (unsigned I = 0; I < RC->getNumRegs(); I++)
247 AMXRegs.set(X86::TMM0 + I);
248
249 // Iterate MF to collect information.
250 MRI = &MF.getRegInfo();
251 MLI = &getAnalysis<MachineLoopInfo>();
252 SmallSet<MIRef, 8> CfgNeedInsert;
254 for (auto &MBB : MF) {
255 size_t Pos = 0;
256 for (auto &MI : MBB) {
257 ++Pos;
258 if (isAMXInstruction(MI)) {
259 // If there's call before the AMX, we need to reload tile config.
260 if (BBVisitedInfo[&MBB].LastCall)
261 CfgNeedInsert.insert(BBVisitedInfo[&MBB].LastCall);
262 else // Otherwise, we need tile config to live in this BB.
263 BBVisitedInfo[&MBB].NeedTileCfgLiveIn = true;
264 // Always record the first AMX in case there's shape def after it.
265 if (!BBVisitedInfo[&MBB].FirstAMX)
266 BBVisitedInfo[&MBB].FirstAMX = MIRef(&MI, &MBB, Pos);
267 } else if (MI.isCall() && isDestructiveCall(MI, AMXRegs)) {
268 // Record the call only if the callee clobbers all AMX registers.
269 BBVisitedInfo[&MBB].LastCall = MIRef(&MI, &MBB, Pos);
270 }
271 }
272 if (BBVisitedInfo[&MBB].NeedTileCfgLiveIn) {
273 if (&MBB == &MF.front())
274 CfgNeedInsert.insert(MIRef(&MBB));
275 else
276 CfgLiveInBBs.push_back(&MBB);
277 }
278 if (BBVisitedInfo[&MBB].FirstAMX || BBVisitedInfo[&MBB].HasAMXRegLiveIn)
279 for (auto *Succ : MBB.successors())
280 if (!isLoopBackEdge(Succ, &MBB))
281 BBVisitedInfo[Succ].HasAMXRegLiveIn = true;
282 }
283
284 // Update NeedTileCfgLiveIn for predecessors.
285 while (!CfgLiveInBBs.empty()) {
286 MachineBasicBlock *MBB = CfgLiveInBBs.pop_back_val();
287 for (auto *Pred : MBB->predecessors()) {
288 if (BBVisitedInfo[Pred].LastCall) {
289 CfgNeedInsert.insert(BBVisitedInfo[Pred].LastCall);
290 } else if (!BBVisitedInfo[Pred].NeedTileCfgLiveIn) {
291 BBVisitedInfo[Pred].NeedTileCfgLiveIn = true;
292 if (Pred == &MF.front())
293 CfgNeedInsert.insert(MIRef(Pred));
294 else
295 CfgLiveInBBs.push_back(Pred);
296 }
297 }
298 }
299
300 // There's no AMX instruction if we didn't find a tile config live in point.
301 if (CfgNeedInsert.empty())
302 return false;
303 X86FI->setHasVirtualTileReg(true);
304
305 // Avoid to insert ldtilecfg before any shape defs.
307 for (auto &I : ShapeBBs) {
308 // TODO: We can hoist shapes across BBs here.
309 if (BBVisitedInfo[I.first].HasAMXRegLiveIn) {
310 // We are not able to config tile registers since the shape to config
311 // is not defined yet. Emit error message and continue. The function
312 // would not config tile registers.
313 emitErrorMsg(MF);
314 return false;
315 }
316 if (BBVisitedInfo[I.first].FirstAMX &&
317 BBVisitedInfo[I.first].FirstAMX < I.second.back() &&
318 !hoistShapesInBB(I.first, I.second)) {
319 emitErrorMsg(MF);
320 return false;
321 }
322 WorkList.push_back(I.first);
323 }
324 while (!WorkList.empty()) {
325 MachineBasicBlock *MBB = WorkList.pop_back_val();
326 for (auto *Pred : MBB->predecessors()) {
327 if (!BBVisitedInfo[Pred].TileCfgForbidden && !isLoopBackEdge(MBB, Pred)) {
328 BBVisitedInfo[Pred].TileCfgForbidden = true;
329 WorkList.push_back(Pred);
330 }
331 }
332 }
333
334 DebugLoc DL;
335 SmallSet<MIRef, 8> VisitedOrInserted;
336 int SS = MF.getFrameInfo().CreateStackObject(
337 ST.getTileConfigSize(), ST.getTileConfigAlignment(), false);
338
339 // Try to insert for the tile config live in points.
340 for (const auto &I : CfgNeedInsert) {
341 SmallSet<MIRef, 8> InsertPoints;
342 SmallVector<MIRef, 8> WorkList({I});
343 while (!WorkList.empty()) {
344 MIRef I = WorkList.pop_back_val();
345 if (!VisitedOrInserted.count(I)) {
346 if (!BBVisitedInfo[I.MBB].TileCfgForbidden) {
347 // If the BB is all shapes reachable, stop sink and try to insert.
348 InsertPoints.insert(I);
349 } else {
350 // Avoid the BB to be multi visited.
351 VisitedOrInserted.insert(I);
352 // Sink the inserting point along the chain with NeedTileCfgLiveIn =
353 // true when MBB isn't all shapes reachable.
354 for (auto *Succ : I.MBB->successors())
355 if (BBVisitedInfo[Succ].NeedTileCfgLiveIn)
356 WorkList.push_back(MIRef(Succ));
357 }
358 }
359 }
360
361 // A given point might be forked due to shape conditions are not met.
362 for (MIRef I : InsertPoints) {
363 // Make sure we insert ldtilecfg after the last shape def in MBB.
364 if (ShapeBBs.count(I.MBB) && I < ShapeBBs[I.MBB].back())
365 I = ShapeBBs[I.MBB].back();
366 // There're chances the MBB is sunk more than once. Record it to avoid
367 // multi insert.
368 if (VisitedOrInserted.insert(I).second) {
369 auto II = I.MI ? I.MI->getIterator() : I.MBB->instr_begin();
370 addFrameReference(BuildMI(*I.MBB, ++II, DL, TII->get(X86::PLDTILECFGV)),
371 SS);
372 }
373 }
374 }
375
376 // Zero stack slot.
378 MachineInstr *MI = &*MBB.begin();
379 if (ST.hasAVX512()) {
380 Register Zmm = MRI->createVirtualRegister(&X86::VR512RegClass);
381 BuildMI(MBB, MI, DL, TII->get(X86::AVX512_512_SET0), Zmm);
382 addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::VMOVUPSZmr)), SS)
383 .addReg(Zmm);
384 } else if (ST.hasAVX2()) {
385 Register Ymm = MRI->createVirtualRegister(&X86::VR256RegClass);
386 BuildMI(MBB, MI, DL, TII->get(X86::AVX_SET0), Ymm);
387 addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::VMOVUPSYmr)), SS)
388 .addReg(Ymm);
389 addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::VMOVUPSYmr)), SS, 32)
390 .addReg(Ymm);
391 } else {
392 assert(ST.hasSSE2() && "AMX should assume SSE2 enabled");
393 unsigned StoreOpc = ST.hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr;
394 Register Xmm = MRI->createVirtualRegister(&X86::VR128RegClass);
395 BuildMI(MBB, MI, DL, TII->get(X86::V_SET0), Xmm);
396 addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), SS).addReg(Xmm);
397 addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), SS, 16)
398 .addReg(Xmm);
399 addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), SS, 32)
400 .addReg(Xmm);
401 addFrameReference(BuildMI(MBB, MI, DL, TII->get(StoreOpc)), SS, 48)
402 .addReg(Xmm);
403 }
404 // Fill in the palette first.
405 addFrameReference(BuildMI(MBB, MI, DL, TII->get(X86::MOV8mi)), SS).addImm(1);
406
407 return true;
408}
409
411 return new X86PreTileConfig();
412}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
LLVMContext & Context
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * RHS
Tile Register Pre configure
tilepreconfig
static void emitErrorMsg(MachineFunction &MF)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
clearBitsInMask - Clear any bits in this vector that are set in Mask.
Definition: BitVector.h:713
bool none() const
none - Returns true if none of the bits are set.
Definition: BitVector.h:188
A debug info location.
Definition: DebugLoc.h:33
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< succ_iterator > successors()
iterator_range< pred_iterator > predecessors()
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:313
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:519
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction.
Definition: MachineInstr.h:947
bool isPHI() const
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
bool isLoopHeader(const MachineBasicBlock *BB) const
True if the block is a loop header node.
MachineLoop * getLoopFor(const MachineBasicBlock *BB) const
Return the innermost loop that BB lives in.
const Module * getModule() const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVMContext & getContext() const
Get the global data context.
Definition: Module.h:262
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:81
virtual void releaseMemory()
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition: Pass.cpp:102
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
bool empty() const
Definition: SmallSet.h:159
void clear()
Definition: SmallSet.h:218
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
TargetInstrInfo - Interface to description of machine instruction set.
unsigned getNumRegs() const
Return the number of registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ SS
Definition: X86.h:209
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:361
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2052
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
bool operator>(int64_t V1, const APSInt &V2)
Definition: APSInt.h:362
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:2038
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1846
FunctionPass * createX86PreTileConfigPass()
Return a pass that insert pseudo tile config instruction.
Definition: BitVector.h:858