58#define DEBUG_TYPE "lower-matrix-intrinsics"
60STATISTIC(FlattenedMatrices,
"Number of matrix flattenings");
61STATISTIC(ReshapedMatrices,
"Number of matrix reshapes");
66 cl::desc(
"Enable/disable fusing matrix instructions."));
71 "Tile size for matrix instruction fusion using square-shaped tiles."));
74 cl::desc(
"Generate loop nests for tiling when expected "
75 "number of operations exceeds threshold."));
78 cl::desc(
"Force matrix instruction fusion even if not profitable."));
81 cl::desc(
"Allow the use of FMAs if available and profitable. This may "
82 "result in different results, due to less rounding error."));
86 cl::desc(
"Enable/disable matrix shape verification."),
93 cl::desc(
"Sets the default matrix layout"),
95 "Use column-major layout"),
97 "Use row-major layout")));
103 "matrix-split-matmul-remainder-over-threshold",
cl::Hidden,
104 cl::desc(
"Illegal remainder vectors over this size in bits should be split "
105 "in the inner loop of matmul"),
124 return SV->isZeroEltSplat();
129template <
typename LTy,
typename RTy>
135template <
typename LTy,
typename RTy>
181 unsigned NumElements,
Type *EltType,
186 "Stride must be >= the number of elements in the result vector.");
189 Value *VecStart = Builder.CreateMul(VecIdx, Stride,
"vec.start");
196 VecStart = Builder.CreateGEP(EltType, BasePtr, VecStart,
"vec.gep");
208 ShapeInfo(
unsigned NumRows = 0,
unsigned NumColumns = 0)
209 : NumRows(NumRows), NumColumns(NumColumns),
213 : ShapeInfo(
cast<ConstantInt>(NumRows)->getZExtValue(),
214 cast<ConstantInt>(NumColumns)->getZExtValue()) {}
217 return NumRows == other.NumRows && NumColumns == other.NumColumns;
219 bool operator!=(
const ShapeInfo &other) {
return !(*
this == other); }
223 operator bool()
const {
224 assert(NumRows == 0 || NumColumns != 0);
228 unsigned getStride()
const {
234 unsigned getNumVectors()
const {
241 ShapeInfo t()
const {
return ShapeInfo(NumColumns, NumRows); }
243 friend raw_ostream &
operator<<(raw_ostream &OS, ShapeInfo SI);
249 return OS <<
SI.NumRows <<
'x' <<
SI.NumColumns;
266 switch (Cast->getOpcode()) {
267 case llvm::Instruction::Trunc:
268 case llvm::Instruction::ZExt:
269 case llvm::Instruction::SExt:
270 case llvm::Instruction::FPToUI:
271 case llvm::Instruction::FPToSI:
272 case llvm::Instruction::UIToFP:
273 case llvm::Instruction::SIToFP:
274 case llvm::Instruction::FPTrunc:
275 case llvm::Instruction::FPExt:
277 case llvm::Instruction::AddrSpaceCast:
278 case CastInst::PtrToAddr:
279 case CastInst::PtrToInt:
280 case CastInst::IntToPtr:
282 case CastInst::BitCast: {
285 return SrcVTy->getNumElements() == DestVTy->getNumElements();
288 case llvm::Instruction::CastOpsEnd:
295 switch (
II->getIntrinsicID()) {
297 case Intrinsic::fabs:
303 switch (
I->getOpcode()) {
304 case Instruction::PHI:
305 case Instruction::FNeg:
316 "Can't retrieve shaped operands for an instruction that does not "
317 "preserve shape information");
318 auto Ops =
I->operands();
323static std::optional<ShapeInfo>
331 return ShapeInfo(M, K);
335 return ShapeInfo(
N, M);
340 return ShapeInfo(
N, M);
343 return ShapeInfo(M,
N);
346 auto OpShape = ShapeMap.
find(MatrixA);
347 if (OpShape != ShapeMap.
end())
348 return OpShape->second;
354 for (
auto &
Op : ShapedOps) {
355 auto OpShape = ShapeMap.
find(
Op.get());
356 if (OpShape != ShapeMap.
end())
357 return OpShape->second;
388class LowerMatrixIntrinsics {
390 const DataLayout &DL;
391 const TargetTransformInfo &TTI;
394 DominatorTree *DT =
nullptr;
395 LoopInfo *LI =
nullptr;
396 OptimizationRemarkEmitter *ORE =
nullptr;
402 unsigned NumStores = 0;
404 unsigned NumLoads = 0;
406 unsigned NumComputeOps = 0;
410 unsigned NumExposedTransposes = 0;
413 NumStores +=
RHS.NumStores;
414 NumLoads +=
RHS.NumLoads;
415 NumComputeOps +=
RHS.NumComputeOps;
416 NumExposedTransposes +=
RHS.NumExposedTransposes;
424 SmallVector<Value *, 16> Vectors;
428 bool IsColumnMajor =
true;
435 MatrixTy(
unsigned NumRows,
unsigned NumColumns,
Type *EltTy)
438 unsigned D = isColumnMajor() ? NumColumns : NumRows;
439 for (
unsigned J = 0; J <
D; ++J)
441 EltTy, isColumnMajor() ? NumRows : NumColumns)));
444 Value *getVector(
unsigned i)
const {
return Vectors[i]; }
445 Value *getColumn(
unsigned i)
const {
446 assert(isColumnMajor() &&
"only supported for column-major matrixes");
449 Value *getRow(
unsigned i)
const {
450 assert(!isColumnMajor() &&
"only supported for row-major matrixes");
454 void setVector(
unsigned i,
Value *V) { Vectors[i] =
V; }
456 Type *getElementType()
const {
return getVectorTy()->getElementType(); }
458 unsigned getNumVectors()
const {
460 return getNumColumns();
464 unsigned getNumColumns()
const {
466 return Vectors.size();
468 assert(Vectors.size() > 0 &&
"Cannot call getNumRows without columns");
469 return getVectorTy()->getNumElements();
472 unsigned getNumRows()
const {
473 if (isColumnMajor()) {
474 assert(Vectors.size() > 0 &&
"Cannot call getNumRows without columns");
475 return getVectorTy()->getNumElements();
477 return Vectors.size();
480 void addVector(
Value *V) { Vectors.push_back(V); }
481 FixedVectorType *getColumnTy() {
482 assert(isColumnMajor() &&
"only supported for column-major matrixes");
483 return getVectorTy();
486 FixedVectorType *getVectorTy()
const {
490 iterator_range<SmallVector<Value *, 8>::iterator> columns() {
492 "columns() only supported for column-major matrixes");
493 return make_range(Vectors.begin(), Vectors.end());
496 iterator_range<SmallVector<Value *, 8>::iterator>
vectors() {
497 return make_range(Vectors.begin(), Vectors.end());
503 return Vectors.size() == 1 ? Vectors[0]
507 MatrixTy &addNumLoads(
unsigned N) {
508 OpInfo.NumLoads +=
N;
512 void setNumLoads(
unsigned N) { OpInfo.NumLoads =
N; }
514 MatrixTy &addNumStores(
unsigned N) {
515 OpInfo.NumStores +=
N;
519 MatrixTy &addNumExposedTransposes(
unsigned N) {
520 OpInfo.NumExposedTransposes +=
N;
524 MatrixTy &addNumComputeOps(
unsigned N) {
525 OpInfo.NumComputeOps +=
N;
529 unsigned getNumStores()
const {
return OpInfo.NumStores; }
530 unsigned getNumLoads()
const {
return OpInfo.NumLoads; }
531 unsigned getNumComputeOps()
const {
return OpInfo.NumComputeOps; }
533 const OpInfoTy &getOpInfo()
const {
return OpInfo; }
535 bool isColumnMajor()
const {
return IsColumnMajor; }
537 unsigned getStride()
const {
540 return getNumColumns();
543 ShapeInfo shape()
const {
return {getNumRows(), getNumColumns()}; }
550 Value *Vec = isColumnMajor() ? getColumn(J) : getRow(
I);
553 "Extracted vector will contain poison values");
574 DenseMap<Value *, ShapeInfo> ShapeMap;
579 SmallVector<Instruction *, 16> ToRemove;
582 MapVector<Value *, MatrixTy> Inst2ColumnMatrix;
585 static FastMathFlags getFastMathFlags(Instruction *Inst) {
597 LowerMatrixIntrinsics(Function &
F, TargetTransformInfo &TTI,
599 : Func(
F), DL(
F.getDataLayout()), TTI(TTI), AM(AM) {}
601 unsigned getNumOps(
Type *VT) {
608 bool isMinimal()
const {
614 unsigned getNumOps(
Type *ST,
unsigned N) {
615 return std::ceil((
ST->getPrimitiveSizeInBits() *
N).getFixedValue() /
616 double(TTI.getRegisterBitWidth(
630 unsigned getNumNativeVectorOps(
Type *EltType,
unsigned R,
unsigned M,
632 unsigned NumFMAs =
C * getNumOps(EltType, R) *
M;
633 unsigned NumALoads = getNumOps(EltType, R) *
M;
634 unsigned NumBLoads = getNumOps(EltType, M) *
C;
635 unsigned NumStores = getNumOps(EltType, R) *
C;
636 return NumFMAs + NumALoads + NumBLoads + NumStores;
644 MatrixTy getMatrix(
Value *MatrixVal,
const ShapeInfo &SI,
648 "The vector size must match the number of matrix elements");
654 auto Found = Inst2ColumnMatrix.find(MatrixVal);
655 if (Found != Inst2ColumnMatrix.end()) {
656 MatrixTy &
M = Found->second;
659 if (
SI.NumRows ==
M.getNumRows() &&
SI.NumColumns ==
M.getNumColumns())
662 MatrixVal =
M.embedInVector(Builder);
666 SmallVector<Value *, 16> SplitVecs;
668 MaskStart +=
SI.getStride()) {
676 if (Found != Inst2ColumnMatrix.end()) {
679 LLVM_DEBUG(
dbgs() <<
"matrix reshape from " << Found->second.shape()
680 <<
" to " << SI <<
" using at least "
681 << SplitVecs.
size() <<
" shuffles on behalf of:\n"
684 }
else if (!ShapeMap.contains(MatrixVal)) {
687 <<
"splitting a " << SI <<
" matrix with " << SplitVecs.
size()
688 <<
" shuffles beacuse we do not have a shape-aware lowering for "
705 bool setShapeInfo(
Value *V, ShapeInfo Shape) {
706 assert(Shape &&
"Shape not set");
710 auto SIter = ShapeMap.find(V);
711 if (SIter != ShapeMap.end()) {
713 SIter->second.NumColumns != Shape.NumColumns)) {
714 errs() <<
"Conflicting shapes (" << SIter->second.NumRows <<
"x"
715 << SIter->second.NumColumns <<
" vs " << Shape.NumRows <<
"x"
716 << Shape.NumColumns <<
") for " << *
V <<
"\n";
718 "Matrix shape verification failed, compilation aborted!");
722 << SIter->second.NumRows <<
" "
723 << SIter->second.NumColumns <<
" for " << *V <<
"\n");
727 ShapeMap.insert({
V, Shape});
728 LLVM_DEBUG(
dbgs() <<
" " << Shape.NumRows <<
" x " << Shape.NumColumns
729 <<
" for " << *V <<
"\n");
735 bool supportsShapeInfo(
Value *V) {
742 switch (
II->getIntrinsicID()) {
743 case Intrinsic::matrix_multiply:
744 case Intrinsic::matrix_transpose:
745 case Intrinsic::matrix_column_major_load:
746 case Intrinsic::matrix_column_major_store:
759 propagateShapeForward(SmallVectorImpl<Instruction *> &WorkList) {
765 while (!WorkList.
empty()) {
769 bool Propagate =
false;
771 Propagate = setShapeInfo(Inst, *SI);
775 for (
auto *User : Inst->
users())
776 if (ShapeMap.count(User) == 0)
787 propagateShapeBackward(SmallVectorImpl<Instruction *> &WorkList) {
790 auto pushInstruction = [](
Value *
V,
791 SmallVectorImpl<Instruction *> &WorkList) {
800 while (!WorkList.
empty()) {
803 size_t BeforeProcessingV = WorkList.
size();
815 if (setShapeInfo(MatrixA, {
M,
N}))
816 pushInstruction(MatrixA, WorkList);
818 if (setShapeInfo(MatrixB, {
N,
K}))
819 pushInstruction(MatrixB, WorkList);
824 if (setShapeInfo(MatrixA, {
M,
N}))
825 pushInstruction(MatrixA, WorkList);
829 if (setShapeInfo(MatrixA, {
M,
N})) {
830 pushInstruction(MatrixA, WorkList);
841 ShapeInfo Shape = ShapeMap[
V];
842 for (Use &U : ShapedOps) {
843 if (setShapeInfo(
U.get(), Shape))
844 pushInstruction(
U.get(), WorkList);
850 for (
size_t I = BeforeProcessingV;
I != WorkList.
size();
I++)
851 for (User *U : WorkList[
I]->
users())
862 Value *Op0, ShapeInfo Shape0,
Value *Op1, ShapeInfo Shape1,
863 MatrixBuilder &Builder,
864 function_ref<Instruction *(
Value *, ShapeInfo,
Value *, ShapeInfo)>
867 Op0, Shape0.NumRows, Shape0.NumColumns, Op0->
getName() +
"_t");
870 setShapeInfo(T0, Shape0.t());
872 Op1, Shape1.NumRows, Shape1.NumColumns, Op1->
getName() +
"_t");
873 setShapeInfo(
T1, Shape1.t());
879 void eraseFromParentAndRemoveFromShapeMap(Instruction *Inst) {
880 ShapeMap.erase(Inst);
892 if (
II != BB.
rend() && Inst == &*
II)
894 eraseFromParentAndRemoveFromShapeMap(Inst);
899 void updateShapeAndReplaceAllUsesWith(Instruction &Old,
Value *New) {
903 auto S = ShapeMap.find(&Old);
904 if (S != ShapeMap.end()) {
906 if (supportsShapeInfo(New))
907 ShapeMap.insert({
New, S->second});
920 MatrixBuilder Builder(IB);
923 ConstantInt *
R, *
K, *
C;
932 updateShapeAndReplaceAllUsesWith(
I, TATA);
933 eraseFromParentAndMove(&
I,
II, BB);
934 eraseFromParentAndMove(TA,
II, BB);
941 updateShapeAndReplaceAllUsesWith(
I, TA);
942 eraseFromParentAndMove(&
I,
II, BB);
952 auto NewInst = distributeTransposes(
953 TAMB, {
K,
C}, TAMA, {
R,
K}, Builder,
954 [&](
Value *T0, ShapeInfo Shape0,
Value *
T1, ShapeInfo Shape1) {
957 Shape1.NumColumns,
"mmul");
959 updateShapeAndReplaceAllUsesWith(
I, NewInst);
960 eraseFromParentAndMove(&
I,
II, BB);
961 eraseFromParentAndMove(TA,
II, BB);
975 auto NewInst = distributeTransposes(
976 TAMA, {
R,
C}, TAMB, {
R,
C}, Builder,
977 [&](
Value *T0, ShapeInfo Shape0,
Value *
T1, ShapeInfo Shape1) {
978 bool IsFP =
I.getType()->isFPOrFPVectorTy();
979 auto *
Mul = IsFP ? LocalBuilder.CreateFMul(T0,
T1,
"mmul")
980 : LocalBuilder.CreateMul(T0,
T1,
"mmul");
982 setShapeInfo(Result, Shape0);
985 updateShapeAndReplaceAllUsesWith(
I, NewInst);
986 eraseFromParentAndMove(&
I,
II, BB);
987 eraseFromParentAndMove(TA,
II, BB);
996 auto NewInst = distributeTransposes(
997 TAMA, {
R,
C}, TAMB, {
R,
C}, Builder,
998 [&](
Value *T0, ShapeInfo Shape0,
Value *
T1, ShapeInfo Shape1) {
999 bool IsFP =
I.getType()->isFPOrFPVectorTy();
1000 auto *
Add = IsFP ? LocalBuilder.CreateFAdd(T0,
T1,
"madd")
1001 : LocalBuilder.CreateAdd(T0,
T1,
"madd");
1004 setShapeInfo(Result, Shape0);
1007 updateShapeAndReplaceAllUsesWith(
I, NewInst);
1008 eraseFromParentAndMove(&
I,
II, BB);
1009 eraseFromParentAndMove(TA,
II, BB);
1017 bool liftTranspose(Instruction &
I) {
1021 eraseFromParentAndRemoveFromShapeMap(&
T);
1024 if (
A !=
B &&
B->use_empty())
1029 ConstantInt *
R, *
K, *
C;
1037 MatrixBuilder Builder(IB);
1039 BT, AT,
C->getZExtValue(),
K->getZExtValue(),
R->getZExtValue());
1040 setShapeInfo(M, {
C,
R});
1043 updateShapeAndReplaceAllUsesWith(
I, NewInst);
1044 CleanupBinOp(
I,
A,
B);
1056 auto *
Add = Builder.CreateFAdd(AT,
BT,
"mfadd");
1057 MatrixBuilder MBuilder(Builder);
1058 Instruction *NewInst = MBuilder.CreateMatrixTranspose(
1059 Add,
R->getZExtValue(),
C->getZExtValue(),
"mfadd_t");
1060 updateShapeAndReplaceAllUsesWith(
I, NewInst);
1063 "Shape of new instruction doesn't match original shape.");
1064 CleanupBinOp(
I,
A,
B);
1066 setShapeInfo(AddI, {
R,
C});
1070 "Shape of updated addition doesn't match cached shape.");
1078 bool optimizeTransposes() {
1082 for (BasicBlock &BB :
reverse(Func)) {
1087 if (Instruction *NewInst = sinkTranspose(
I,
II,
Changed))
1094 for (BasicBlock &BB : Func) {
1107 for (BasicBlock &BB : Func)
1108 for (Instruction &Inst : BB) {
1113 switch (
II->getIntrinsicID()) {
1114 case Intrinsic::matrix_multiply:
1115 case Intrinsic::matrix_transpose:
1116 case Intrinsic::matrix_column_major_load:
1117 case Intrinsic::matrix_column_major_store:
1126 if (WorkList.
empty())
1130 ORE = &AM->getResult<OptimizationRemarkEmitterAnalysis>(Func);
1131 AA = &AM->getResult<AAManager>(Func);
1132 DT = &AM->getResult<DominatorTreeAnalysis>(Func);
1133 LI = &AM->getResult<LoopAnalysis>(Func);
1137 while (!WorkList.
empty()) {
1138 WorkList = propagateShapeForward(WorkList);
1139 WorkList = propagateShapeBackward(WorkList);
1144 Changed |= optimizeTransposes();
1146 dbgs() <<
"Dump after matrix transpose optimization:\n";
1152 SmallVector<Instruction *, 16> MatrixInsts;
1157 ReversePostOrderTraversal<Function *> RPOT(&Func);
1158 for (
auto *BB : RPOT)
1159 for (Instruction &
I : *BB) {
1162 if (!ShapeMap.contains(&
I))
1170 SmallPtrSet<Instruction *, 16> FusedInsts;
1171 for (CallInst *CI : MaybeFusableInsts)
1172 lowerDotProduct(CI, FusedInsts, getFastMathFlags(CI));
1175 for (CallInst *CI : MaybeFusableInsts)
1177 LowerMatrixMultiplyFused(CI, FusedInsts, LifetimeEnds);
1183 for (Instruction *Inst : MatrixInsts) {
1184 if (FusedInsts.
count(Inst))
1191 const ShapeInfo &
SI = ShapeMap.at(Inst);
1193 MatrixTy PhiM(
SI.NumRows,
SI.NumColumns, EltTy);
1196 for (
unsigned VI = 0, VE = PhiM.getNumVectors(); VI != VE; ++VI)
1197 PhiM.setVector(VI, Builder.CreatePHI(PhiM.getVectorTy(),
1198 PHI->getNumIncomingValues(),
1200 assert(!Inst2ColumnMatrix.contains(
PHI) &&
"map already contains phi?");
1201 Inst2ColumnMatrix[
PHI] = PhiM;
1205 for (Instruction *Inst : MatrixInsts) {
1206 if (FusedInsts.
count(Inst))
1209 const ShapeInfo &
SI = ShapeMap.at(Inst);
1216 Result = VisitBinaryOperator(BinOp, SI, Builder);
1218 Result = VisitCastInstruction(Cast, SI, Builder);
1220 Result = VisitUnaryOperator(UnOp, SI, Builder);
1222 Result = VisitIntrinsicInst(Intr, SI, Builder);
1234 finalizeLowering(Inst, Result, Builder);
1239 RemarkGenerator RemarkGen(Inst2ColumnMatrix, *ORE, Func);
1240 RemarkGen.emitRemarks();
1252 SmallPtrSet<Instruction *, 16> PoisonedInsts;
1253 for (
auto *Inst :
reverse(ToRemove)) {
1256 PoisonedInsts.
insert(Poisoned);
1260 PoisonedInsts.
erase(Inst);
1262 if (!PoisonedInsts.
empty()) {
1264 dbgs() <<
"Poisoned but present instructions:\n";
1265 for (
auto *
I : PoisonedInsts)
1266 dbgs() << *
I <<
"\n";
1274 MatrixTy VisitIntrinsicInst(IntrinsicInst *Inst,
const ShapeInfo &SI,
1280 case Intrinsic::matrix_multiply:
1281 return LowerMultiply(Inst, Builder);
1282 case Intrinsic::matrix_transpose:
1283 return LowerTranspose(Inst, Builder);
1284 case Intrinsic::matrix_column_major_load:
1285 return LowerColumnMajorLoad(Inst, Builder);
1286 case Intrinsic::matrix_column_major_store:
1287 return LowerColumnMajorStore(Inst, Builder);
1288 case Intrinsic::abs:
1289 case Intrinsic::fabs: {
1291 MatrixTy
M = getMatrix(Inst->
getOperand(0), SI, Builder);
1294 for (
auto *
Vector :
M.vectors()) {
1296 case Intrinsic::abs:
1300 case Intrinsic::fabs:
1309 return Result.addNumComputeOps(getNumOps(
Result.getVectorTy()) *
1316 "only intrinsics supporting shape info should be seen here");
1324 Align getAlignForIndex(
unsigned Idx,
Value *Stride,
Type *ElementTy,
1325 MaybeAlign
A)
const {
1326 Align InitialAlign = DL.getValueOrABITypeAlignment(
A, ElementTy);
1328 return InitialAlign;
1330 TypeSize ElementSizeInBits = DL.getTypeSizeInBits(ElementTy);
1332 uint64_t StrideInBytes =
1333 ConstStride->getZExtValue() * ElementSizeInBits / 8;
1343 Value *getIndex(
Value *Ptr, uint64_t V)
const {
1349 "Attempted to cast non-integral type to integer index");
1354 V->getName() +
".cast");
1359 MatrixTy loadMatrix(
Type *Ty,
Value *Ptr, MaybeAlign MAlign,
Value *Stride,
1360 bool IsVolatile, ShapeInfo Shape,
IRBuilder<> &Builder) {
1364 Value *EltPtr = Ptr;
1366 Stride = castToIndexType(Ptr, Stride, Builder);
1367 for (
unsigned I = 0,
E = Shape.getNumVectors();
I <
E; ++
I) {
1370 Stride, Shape.getStride(), EltTy, Builder);
1372 VecTy,
GEP, getAlignForIndex(
I, Stride, EltTy, MAlign),
1373 IsVolatile,
"col.load");
1377 return Result.addNumLoads(getNumOps(
Result.getVectorTy()) *
1383 MatrixTy loadMatrix(
Value *MatrixPtr, MaybeAlign Align,
bool IsVolatile,
1385 ShapeInfo ResultShape,
Type *EltTy,
1388 Builder.
CreateMul(J, getIndex(MatrixPtr, MatrixShape.getStride())),
I);
1392 ResultShape.NumColumns);
1394 return loadMatrix(TileTy, TileStart, Align,
1395 getIndex(MatrixPtr, MatrixShape.getStride()), IsVolatile,
1396 ResultShape, Builder);
1400 MatrixTy
LowerLoad(Instruction *Inst,
Value *Ptr, MaybeAlign Align,
1401 Value *Stride,
bool IsVolatile, ShapeInfo Shape,
1403 return loadMatrix(Inst->
getType(), Ptr, Align, Stride, IsVolatile, Shape,
1410 MatrixTy LowerColumnMajorLoad(CallInst *Inst,
IRBuilder<> &Builder) {
1412 "Intrinsic only supports column-major layout!");
1417 {Inst->getArgOperand(3), Inst->getArgOperand(4)}, Builder);
1422 void storeMatrix(
const MatrixTy &StoreVal,
Value *MatrixPtr,
1423 MaybeAlign MAlign,
bool IsVolatile, ShapeInfo MatrixShape,
1426 Builder.
CreateMul(J, getIndex(MatrixPtr, MatrixShape.getStride())),
I);
1430 StoreVal.getNumColumns());
1432 storeMatrix(TileTy, StoreVal, TileStart, MAlign,
1433 getIndex(MatrixPtr, MatrixShape.getStride()), IsVolatile,
1439 MatrixTy storeMatrix(
Type *Ty, MatrixTy StoreVal,
Value *Ptr,
1440 MaybeAlign MAlign,
Value *Stride,
bool IsVolatile,
1443 Value *EltPtr = Ptr;
1444 Stride = castToIndexType(Ptr, Stride, Builder);
1445 for (
auto Vec :
enumerate(StoreVal.vectors())) {
1452 getAlignForIndex(Vec.index(), Stride,
1457 return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) *
1458 StoreVal.getNumVectors());
1463 MaybeAlign
A,
Value *Stride,
bool IsVolatile,
1465 auto StoreVal = getMatrix(
Matrix, Shape, Builder);
1466 return storeMatrix(
Matrix->getType(), StoreVal, Ptr,
A, Stride, IsVolatile,
1473 MatrixTy LowerColumnMajorStore(CallInst *Inst,
IRBuilder<> &Builder) {
1475 "Intrinsic only supports column-major layout!");
1481 {Inst->getArgOperand(4), Inst->getArgOperand(5)},
1490 unsigned BlockNumElts =
1493 assert(NumElts >= BlockNumElts &&
"Too few elements for current block");
1500 SmallVector<int, 16>
Mask;
1502 for (i = 0; i <
I; i++)
1505 unsigned VecNumElts =
1507 for (; i <
I + BlockNumElts; i++)
1508 Mask.push_back(i -
I + VecNumElts);
1510 for (; i < VecNumElts; i++)
1518 unsigned &NumComputeOps) {
1519 NumComputeOps += getNumOps(
A->getType());
1524 if (AllowContraction) {
1530 NumComputeOps += getNumOps(
A->getType());
1535 NumComputeOps += getNumOps(
A->getType());
1545 void finalizeLowering(Instruction *Inst, MatrixTy
Matrix,
1547 auto inserted = Inst2ColumnMatrix.insert(std::make_pair(Inst,
Matrix));
1550 "multiple matrix lowering mapping");
1552 ToRemove.push_back(Inst);
1553 Value *Flattened =
nullptr;
1555 if (ShapeMap.contains(
U.getUser()))
1559 Flattened =
Matrix.embedInVector(Builder);
1562 <<
"flattening a " <<
Matrix.shape() <<
" matrix:\n"
1564 <<
"\nbecause we do not have a shape-aware lowering for its "
1567 FlattenedMatrices++;
1576 void lowerDotProduct(CallInst *MatMul,
1577 SmallPtrSet<Instruction *, 16> &FusedInsts,
1578 FastMathFlags FMF) {
1585 if (LShape.NumRows != 1 || RShape.NumColumns != 1)
1598 auto CanBeFlattened = [](
Value *
Op) {
1611 auto GetCostForArg = [
this, &CanBeFlattened](
Value *
Op,
unsigned N) {
1612 if (!ShapeMap.contains(
Op))
1613 return InstructionCost::getInvalid();
1621 if (!CanBeFlattened(
Op)) {
1624 for (
unsigned I = 1;
I <
N; ++
I)
1625 EmbedCost += TTI.getShuffleCost(
1638 return NewCost - OriginalCost;
1646 for (
unsigned I = 1;
I <
N; ++
I)
1647 EmbedCost -= TTI.getShuffleCost(
1657 return TTI.getMemoryOpCost(Instruction::Load, VecTy,
Align(1), 0) -
1658 N * TTI.getMemoryOpCost(Instruction::Load, EltTy,
Align(1), 0);
1664 SmallPtrSet<Value *, 4> Seen;
1669 while (!WorkList.
empty()) {
1675 if (OpCost + LHSCost >= LHSCost)
1681 WorkList.
append(
I->op_begin(),
I->op_end());
1685 int AddOpCode = IsIntVec ? Instruction::Add : Instruction::FAdd;
1686 int MulOpCode = IsIntVec ? Instruction::Mul : Instruction::FMul;
1688 TTI.getArithmeticReductionCost(
1690 IsIntVec ? std::nullopt : std::optional(FMF)) +
1691 TTI.getArithmeticInstrCost(MulOpCode,
LHS->
getType());
1693 TTI.getArithmeticInstrCost(AddOpCode, ElementType) *
1694 (LShape.NumColumns - 1) +
1695 TTI.getArithmeticInstrCost(MulOpCode, ElementType) *
1696 (LShape.NumColumns);
1697 if ((LHSCost + ReductionCost - SequentialAddCost) >
InstructionCost(0))
1700 FusedInsts.
insert(MatMul);
1702 auto FlattenArg = [&Builder, &FusedInsts, &CanBeFlattened,
1707 if (!CanBeFlattened(
Op))
1711 auto It = ShapeMap.find(
Op);
1712 if (It != ShapeMap.end()) {
1713 It->second = It->second.t();
1723 auto *NewLoad = Builder.
CreateLoad(
Op->getType(), Arg);
1724 Op->replaceAllUsesWith(NewLoad);
1730 Op->replaceAllUsesWith(Arg);
1735 for (
auto *V : ToFlatten)
1757 Result, uint64_t(0));
1759 FusedInsts.insert(MatMul);
1760 ToRemove.push_back(MatMul);
1766 unsigned capBlockSize(
unsigned BlockSize,
unsigned Remainder,
Type *EltType) {
1772 if (TTI.isTypeLegal(VecTy))
1795 void emitMatrixMultiply(MatrixTy &Result,
const MatrixTy &
A,
1797 bool IsScalarMatrixTransposed, FastMathFlags FMF) {
1798 const unsigned VF = std::max<unsigned>(
1801 Result.getElementType()->getPrimitiveSizeInBits().getFixedValue(),
1803 unsigned R =
Result.getNumRows();
1804 unsigned C =
Result.getNumColumns();
1805 unsigned M =
A.getNumColumns();
1807 bool IsFP =
Result.getElementType()->isFloatingPointTy();
1808 assert(
A.isColumnMajor() ==
B.isColumnMajor() &&
1809 Result.isColumnMajor() ==
A.isColumnMajor() &&
1810 "operands must agree on matrix layout");
1811 unsigned NumComputeOps = 0;
1815 if (
A.isColumnMajor()) {
1819 for (
unsigned J = 0; J <
C; ++J) {
1829 for (
unsigned K = 0;
K <
M; ++
K) {
1832 B.getColumn(IsScalarMatrixTransposed ? K : J),
1833 IsScalarMatrixTransposed ? J : K);
1836 createMulAdd(isSumZero && K == 0 ?
nullptr : Sum, L,
Splat,
1847 for (
unsigned I = 0;
I <
R; ++
I) {
1850 for (
unsigned J = 0; J <
C; J +=
BlockSize) {
1854 Value *Sum =
nullptr;
1855 for (
unsigned K = 0;
K <
M; ++
K) {
1858 A.getVector(IsScalarMatrixTransposed ? K :
I),
1859 IsScalarMatrixTransposed ?
I : K);
1862 createMulAdd(isSumZero && K == 0 ?
nullptr : Sum,
Splat, R,
1870 Result.addNumComputeOps(NumComputeOps);
1876 Value *getNonAliasingPointer(LoadInst *Load, StoreInst *Store,
1882 if (AA->isNoAlias(LoadLoc, StoreLoc))
1883 return Load->getPointerOperand();
1895 DTUpdates.
push_back({DT->Delete, Check0, Succ});
1899 nullptr,
"alias_cont");
1905 nullptr,
"no_alias");
1913 Type *AddrTy = DL.getAddressType(
Store->getPointerOperand()->getType());
1914 Value *StoreBegin =
Store->getPointerOperand();
1916 StoreBegin, ConstantInt::get(AddrTy, StoreLoc.
Size.
getValue()),
1919 Value *LoadBegin =
Load->getPointerOperand();
1921 Builder.
CreateICmpULT(LoadBegin, StoreEnd), Check1, Fusion);
1930 LoadBegin, ConstantInt::get(AddrTy, LoadLoc.
Size.
getValue()),
1942 auto *ArrayTy = ArrayType::get(VT->getElementType(), VT->getNumElements());
1943 AllocaInst *Alloca =
1951 PHI->addIncoming(
Load->getPointerOperand(), Check1);
1952 PHI->addIncoming(Alloca, Copy);
1955 DTUpdates.
push_back({DT->Insert, Check0, Check1});
1956 DTUpdates.
push_back({DT->Insert, Check0, Fusion});
1958 DTUpdates.
push_back({DT->Insert, Check1, Fusion});
1959 DT->applyUpdates(DTUpdates);
1963 bool isFusionProfitable(CallInst *MatMul) {
1970 const unsigned R = LShape.NumRows;
1971 const unsigned C = RShape.NumColumns;
1972 const unsigned M = LShape.NumColumns;
1975 const unsigned VF = std::max<unsigned>(
1987 if (R <= VF &&
C == 1)
1993 unsigned Op0Regs = (
R + VF - 1) / VF * M;
1994 unsigned Op1Regs = (
M + VF - 1) / VF *
C;
1995 return Op0Regs + Op1Regs >
1996 TTI.getNumberOfRegisters(TTI.getRegisterClassForType(
true));
1999 MatrixTy getZeroMatrix(
Type *EltType,
unsigned R,
unsigned C) {
2002 for (
unsigned I = 0;
I <
C; ++
I)
2007 void createTiledLoops(CallInst *MatMul,
Value *LPtr, ShapeInfo LShape,
2008 Value *RPtr, ShapeInfo RShape, StoreInst *Store) {
2012 TileInfo TI(LShape.NumRows, RShape.NumColumns, LShape.NumColumns,
TileSize);
2013 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2019 BasicBlock *InnerBody = TI.CreateTiledLoops(Start, End, Builder, DTU, *LI);
2023 MatrixTy TileResult;
2029 auto *
Phi = Builder.
CreatePHI(TileVecTy, 2,
"result.vec." + Twine(
I));
2031 TI.RowLoop.Header->getSingleSuccessor());
2032 TileResult.addVector(Phi);
2041 loadMatrix(LPtr, {},
false, LShape, TI.RowLoop.Index, TI.KLoop.Index,
2044 loadMatrix(RPtr, {},
false, RShape, TI.KLoop.Index, TI.ColumnLoop.Index,
2046 emitMatrixMultiply(TileResult,
A,
B, Builder,
true,
false,
2047 getFastMathFlags(MatMul));
2050 storeMatrix(TileResult,
Store->getPointerOperand(),
Store->getAlign(),
2051 Store->isVolatile(), {LShape.NumRows, RShape.NumColumns},
2052 TI.RowLoop.Index, TI.ColumnLoop.Index, EltType, Builder);
2054 for (
unsigned I = 0;
I < TileResult.getNumVectors();
I++)
2055 ColumnPhis[
I]->addIncoming(TileResult.getVector(
I), TI.KLoop.Latch);
2061 unsigned InnerLoopUnrollCount = std::min(10u, LShape.NumColumns /
TileSize);
2063 "llvm.loop.unroll.count", InnerLoopUnrollCount);
2066 void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1,
2068 SmallPtrSetImpl<Instruction *> &FusedInsts) {
2070 "Tiling only supported for column-major matrixes at the moment!");
2071 if (!isFusionProfitable(MatMul))
2077 const unsigned R = LShape.NumRows;
2078 const unsigned C = RShape.NumColumns;
2079 const unsigned M = LShape.NumColumns;
2082 Value *APtr = getNonAliasingPointer(LoadOp0, Store, MatMul);
2083 Value *BPtr = getNonAliasingPointer(LoadOp1, Store, MatMul);
2088 unsigned NumOps = getNumNativeVectorOps(EltType, R, M,
C);
2092 createTiledLoops(MatMul, APtr, LShape, BPtr, RShape, Store);
2095 for (
unsigned J = 0; J <
C; J +=
TileSize)
2097 const unsigned TileR = std::min(R -
I,
unsigned(
TileSize));
2098 const unsigned TileC = std::min(
C - J,
unsigned(
TileSize));
2099 MatrixTy Res = getZeroMatrix(EltType, TileR, TileC);
2102 const unsigned TileM = std::min(M - K,
unsigned(
TileSize));
2105 LShape, getIndex(APtr,
I), getIndex(APtr, K),
2106 {TileR, TileM}, EltType, Builder);
2109 RShape, getIndex(BPtr, K), getIndex(BPtr, J),
2110 {TileM, TileC}, EltType, Builder);
2111 emitMatrixMultiply(Res,
A,
B, Builder,
true,
false,
2112 getFastMathFlags(MatMul));
2114 storeMatrix(Res, CPtr,
Store->getAlign(),
Store->isVolatile(), {R, M},
2115 getIndex(CPtr,
I), getIndex(CPtr, J), EltType, Builder);
2120 FusedInsts.
insert(Store);
2121 FusedInsts.
insert(MatMul);
2122 eraseFromParentAndRemoveFromShapeMap(Store);
2123 eraseFromParentAndRemoveFromShapeMap(MatMul);
2125 FusedInsts.
insert(LoadOp0);
2126 eraseFromParentAndRemoveFromShapeMap(LoadOp0);
2128 if (LoadOp1 != LoadOp0 && LoadOp1->
use_empty()) {
2129 FusedInsts.
insert(LoadOp1);
2130 eraseFromParentAndRemoveFromShapeMap(LoadOp1);
2139 LowerMatrixMultiplyFused(CallInst *MatMul,
2140 SmallPtrSetImpl<Instruction *> &FusedInsts,
2145 assert(AA && LI &&
"Analyses should be available");
2160 const unsigned R = LShape.NumRows;
2161 const unsigned M = LShape.NumColumns;
2162 const unsigned C = RShape.NumColumns;
2169 MA = getMatrix(
A, ShapeInfo(R, M), Builder);
2170 MB = getMatrix(
T, ShapeInfo(
C, M), Builder);
2173 MA = getMatrix(
T, ShapeInfo(R, M), Builder);
2174 MB = getMatrix(
B, ShapeInfo(
C, M), Builder);
2179 MatrixTy
Result(R,
C, EltType);
2181 emitMatrixMultiply(Result, MA, MB, Builder,
false,
true,
2182 getFastMathFlags(MatMul));
2184 FusedInsts.
insert(MatMul);
2190 Inst2ColumnMatrix[Transpose] = MatrixTy(M,
C, EltType);
2192 finalizeLowering(MatMul, Result, Builder);
2204 if (LoadOp0 && LoadOp1 && Store) {
2207 SetVector<Value *> WorkList;
2210 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
2211 Value *Current = WorkList[
I];
2217 if (DT->dominates(CurrI, MatMul))
2219 if (CurrI->mayHaveSideEffects() || CurrI->mayReadFromMemory())
2225 sort(ToHoist, [
this](Instruction *
A, Instruction *
B) {
2226 return DT->dominates(
A,
B);
2228 for (Instruction *
I : ToHoist)
2241 bool FusableOpsInSameBlock = LoadOp0->
getParent() == StoreParent &&
2243 for (
unsigned Idx = 0; Idx != LifetimeEnds.
size();) {
2244 IntrinsicInst *End = LifetimeEnds[Idx];
2248 if (DT->dominates(End, LoadOp0) && DT->dominates(End, LoadOp1))
2250 if (DT->dominates(Store, End))
2254 if (FusableOpsInSameBlock && End->
getParent() != StoreParent)
2262 if (AA->isNoAlias(Load0Loc, EndLoc) && AA->isNoAlias(Load1Loc, EndLoc))
2274 ToRemove.push_back(End);
2280 emitSIMDTiling(MatMul, LoadOp0, LoadOp1, Store, FusedInsts);
2286 MatrixTy LowerMultiply(CallInst *MatMul,
IRBuilder<> &Builder) {
2291 const MatrixTy &Lhs = getMatrix(MatMul->
getArgOperand(0), LShape, Builder);
2292 const MatrixTy &Rhs = getMatrix(MatMul->
getArgOperand(1), RShape, Builder);
2293 assert(Lhs.getElementType() == Rhs.getElementType() &&
2294 "Matrix multiply argument element types do not match.");
2296 const unsigned R = LShape.NumRows;
2297 const unsigned C = RShape.NumColumns;
2298 assert(LShape.NumColumns == RShape.NumRows);
2301 MatrixTy
Result(R,
C, EltType);
2302 assert(Lhs.getElementType() ==
Result.getElementType() &&
2303 "Matrix multiply result element type does not match arguments.");
2305 emitMatrixMultiply(Result, Lhs, Rhs, Builder,
false,
false,
2306 getFastMathFlags(MatMul));
2311 MatrixTy LowerTranspose(CallInst *Inst,
IRBuilder<> &Builder) {
2316 MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder);
2318 const unsigned NewNumVecs =
2319 InputMatrix.isColumnMajor() ? ArgShape.NumRows : ArgShape.NumColumns;
2320 const unsigned NewNumElts =
2321 InputMatrix.isColumnMajor() ? ArgShape.NumColumns : ArgShape.NumRows;
2323 for (
unsigned I = 0;
I < NewNumVecs; ++
I) {
2328 for (
auto J :
enumerate(InputMatrix.vectors())) {
2334 Result.addVector(ResultVector);
2340 return Result.addNumComputeOps(2 * ArgShape.NumRows * ArgShape.NumColumns)
2341 .addNumExposedTransposes(1);
2345 MatrixTy VisitLoad(LoadInst *Inst,
const ShapeInfo &SI,
Value *Ptr,
2351 MatrixTy VisitStore(StoreInst *Inst,
const ShapeInfo &SI,
Value *StoredVal,
2358 MatrixTy VisitPHI(PHINode *Inst,
const ShapeInfo &SI,
IRBuilder<> &Builder) {
2359 auto BlockIP = Inst->
getParent()->getFirstInsertionPt();
2361 MatrixTy PhiM = getMatrix(Inst, SI, Builder);
2363 for (
auto [IncomingV, IncomingB] :
2370 if (
auto MaybeIP = IncomingInst->getInsertionPointAfterDef())
2373 MatrixTy OpM = getMatrix(IncomingV, SI, Builder);
2375 for (
unsigned VI = 0, VE = PhiM.getNumVectors(); VI != VE; ++VI) {
2377 NewPHI->
addIncoming(OpM.getVector(VI), IncomingB);
2388 MatrixTy VisitBinaryOperator(BinaryOperator *Inst,
const ShapeInfo &SI,
2394 MatrixTy
A = getMatrix(Lhs, SI, Builder);
2395 MatrixTy
B = getMatrix(Rhs, SI, Builder);
2396 assert(
A.isColumnMajor() ==
B.isColumnMajor() &&
2397 Result.isColumnMajor() ==
A.isColumnMajor() &&
2398 "operands must agree on matrix layout");
2405 return Result.addNumComputeOps(getNumOps(
Result.getVectorTy()) *
2410 MatrixTy VisitUnaryOperator(UnaryOperator *Inst,
const ShapeInfo &SI,
2415 MatrixTy
M = getMatrix(
Op, SI, Builder);
2420 auto BuildVectorOp = [&Builder, Inst](
Value *
Op) {
2422 case Instruction::FNeg:
2429 for (
auto *
Vector :
M.vectors())
2432 return Result.addNumComputeOps(getNumOps(
Result.getVectorTy()) *
2437 MatrixTy VisitCastInstruction(CastInst *Inst,
const ShapeInfo &Shape,
2442 MatrixTy
M = getMatrix(
Op, Shape, Builder);
2447 auto *NewVTy = VectorType::get(OrigVTy->getElementType(),
2450 for (
auto *
Vector :
M.vectors())
2453 return Result.addNumComputeOps(getNumOps(
Result.getVectorTy()) *
2458 MatrixTy VisitSelectInst(SelectInst *Inst,
const ShapeInfo &Shape,
2465 MatrixTy
A = getMatrix(OpA, Shape, Builder);
2466 MatrixTy
B = getMatrix(OpB, Shape, Builder);
2471 MatrixTy
C = getMatrix(
Cond, Shape, Builder);
2472 llvm::copy(
C.vectors(), std::back_inserter(CondV));
2474 CondV.
resize(
A.getNumVectors());
2482 "If we have a vector conditional, we should be propagating "
2483 "profile information.");
2487 return Result.addNumComputeOps(getNumOps(
Result.getVectorTy()) *
2494 struct ExprLinearizer {
2495 unsigned LengthToBreak = 100;
2497 raw_string_ostream Stream;
2498 unsigned LineLength = 0;
2499 const DataLayout &DL;
2503 const MapVector<Value *, MatrixTy> &Inst2Matrix;
2507 const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared;
2510 const SmallSetVector<Value *, 32> &ExprsInSubprogram;
2517 SmallPtrSet<Value *, 8> ReusedExprs;
2519 ExprLinearizer(
const DataLayout &DL,
2520 const MapVector<Value *, MatrixTy> &Inst2Matrix,
2521 const DenseMap<
Value *, SmallPtrSet<Value *, 2>> &Shared,
2522 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
2524 : Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
2525 ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
2527 void indent(
unsigned N) {
2529 for (
unsigned i = 0; i <
N; i++)
2538 void maybeIndent(
unsigned Indent) {
2539 if (LineLength >= LengthToBreak)
2542 if (LineLength == 0)
2546 void write(StringRef S) {
2547 LineLength += S.
size();
2551 Value *getUnderlyingObjectThroughLoads(
Value *V) {
2553 return getUnderlyingObjectThroughLoads(Ptr);
2554 else if (
V->getType()->isPointerTy())
2560 bool isMatrix(
Value *V)
const {
return ExprsInSubprogram.count(V); }
2564 void prettyPrintMatrixType(
Value *V, raw_string_ostream &SS) {
2565 auto M = Inst2Matrix.find(V);
2566 if (M == Inst2Matrix.end())
2569 SS <<
M->second.getNumRows();
2571 SS <<
M->second.getNumColumns();
2578 void writeFnName(CallInst *CI) {
2580 write(
"<no called fn>");
2583 if (!
Name.starts_with(
"llvm.matrix")) {
2592 raw_string_ostream
SS(Tmp);
2594 switch (
II->getIntrinsicID()) {
2595 case Intrinsic::matrix_multiply:
2596 prettyPrintMatrixType(
II->getOperand(0), SS);
2598 prettyPrintMatrixType(
II->getOperand(1), SS);
2599 SS <<
"." << *
II->getType()->getScalarType();
2601 case Intrinsic::matrix_transpose:
2602 prettyPrintMatrixType(
II->getOperand(0), SS);
2603 SS <<
"." << *
II->getType()->getScalarType();
2605 case Intrinsic::matrix_column_major_load:
2606 prettyPrintMatrixType(
II, SS);
2607 SS <<
"." << *
II->getType()->getScalarType();
2609 case Intrinsic::matrix_column_major_store:
2610 prettyPrintMatrixType(
II->getOperand(0), SS);
2611 SS <<
"." << *
II->getOperand(0)->getType()->getScalarType();
2620 unsigned getNumShapeArgs(CallInst *CI)
const {
2622 switch (
II->getIntrinsicID()) {
2623 case Intrinsic::matrix_multiply:
2625 case Intrinsic::matrix_transpose:
2627 case Intrinsic::matrix_column_major_load:
2628 case Intrinsic::matrix_column_major_store:
2641 V = getUnderlyingObjectThroughLoads(V);
2642 if (
V->getType()->isPointerTy()) {
2644 Stream <<
"stack addr";
2645 LineLength += StringRef(
"stack addr").size();
2648 LineLength += StringRef(
"addr").size();
2650 if (!
V->getName().empty()) {
2651 Stream <<
" %" <<
V->getName() <<
"";
2652 LineLength +=
V->getName().size() + 2;
2658 raw_string_ostream TmpStream(Tmp);
2661 TmpStream << CI->getValue();
2663 TmpStream <<
"constant";
2666 TmpStream <<
"matrix";
2668 TmpStream <<
"scalar";
2670 Tmp = std::string(StringRef(Tmp).trim());
2671 LineLength += Tmp.size();
2678 void linearizeExpr(
Value *Expr,
unsigned Indent,
bool ParentReused,
2679 bool ParentShared) {
2681 maybeIndent(Indent);
2682 SmallVector<Value *, 8>
Ops;
2685 bool ExprShared =
false;
2688 if (!ParentShared) {
2689 auto SI = Shared.find(Expr);
2690 assert(SI != Shared.end() &&
SI->second.count(Leaf));
2696 write(
"shared with remark at line " + std::to_string(DL.getLine()) +
2697 " column " + std::to_string(DL.getCol()) +
" (");
2699 ExprShared =
SI->second.size() > 1;
2702 bool Reused = !ReusedExprs.insert(Expr).second;
2703 if (Reused && !ParentReused)
2716 Ops.append(
I->value_op_begin(),
I->value_op_end());
2717 write(
I->getOpcodeName());
2722 unsigned NumOpsToBreak = 1;
2727 if (
Ops.size() > NumOpsToBreak)
2730 maybeIndent(Indent + 1);
2732 linearizeExpr(
Op, Indent + 1, Reused, ExprShared);
2735 if (
Op !=
Ops.back())
2742 const std::string &getResult() {
2760 struct RemarkGenerator {
2761 const MapVector<Value *, MatrixTy> &Inst2Matrix;
2762 OptimizationRemarkEmitter &ORE;
2764 const DataLayout &DL;
2766 RemarkGenerator(
const MapVector<Value *, MatrixTy> &Inst2Matrix,
2767 OptimizationRemarkEmitter &ORE, Function &Func)
2768 : Inst2Matrix(Inst2Matrix), ORE(ORE), Func(Func),
2769 DL(Func.getDataLayout()) {}
2774 SmallVector<Value *, 4>
2775 getExpressionLeaves(
const SmallSetVector<Value *, 32> &ExprsInSubprogram) {
2776 SmallVector<Value *, 4> Leaves;
2777 for (
auto *Expr : ExprsInSubprogram)
2779 !
any_of(Expr->
users(), [&ExprsInSubprogram](User *U) {
2780 return ExprsInSubprogram.count(U);
2789 void collectSharedInfo(
Value *Leaf,
Value *V,
2790 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
2791 DenseMap<
Value *, SmallPtrSet<Value *, 2>> &Shared) {
2793 if (!ExprsInSubprogram.
count(V))
2799 collectSharedInfo(Leaf,
Op, ExprsInSubprogram, Shared);
2805 std::pair<OpInfoTy, OpInfoTy>
2806 sumOpInfos(
Value *Root, SmallPtrSetImpl<Value *> &ReusedExprs,
2807 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
2808 DenseMap<
Value *, SmallPtrSet<Value *, 2>> &Shared)
const {
2809 if (!ExprsInSubprogram.
count(Root))
2813 if (!ReusedExprs.
insert(Root).second)
2816 OpInfoTy SharedCount;
2820 auto CM = Inst2Matrix.find(Root);
2821 if (
I->second.size() == 1)
2822 Count = CM->second.getOpInfo();
2824 SharedCount = CM->second.getOpInfo();
2827 auto C = sumOpInfos(
Op, ReusedExprs, ExprsInSubprogram, Shared);
2829 SharedCount +=
C.second;
2831 return {
Count, SharedCount};
2834 void emitRemarks() {
2841 MapVector<DISubprogram *, SmallVector<Value *, 8>> Subprog2Exprs;
2842 for (
const auto &KV : Inst2Matrix) {
2843 if (Func.getSubprogram()) {
2845 DILocation *
Context =
I->getDebugLoc();
2852 Subprog2Exprs[
nullptr].push_back(KV.first);
2855 for (
auto &KV : Subprog2Exprs) {
2856 SmallSetVector<Value *, 32> ExprsInSubprogram(KV.second.begin(),
2858 auto Leaves = getExpressionLeaves(ExprsInSubprogram);
2860 DenseMap<Value *, SmallPtrSet<Value *, 2>>
Shared;
2861 for (
Value *Leaf : Leaves)
2862 collectSharedInfo(Leaf, Leaf, ExprsInSubprogram, Shared);
2865 for (
auto *L : Leaves) {
2877 SmallPtrSet<Value *, 8> ReusedExprs;
2878 OpInfoTy Counts, SharedCounts;
2879 std::tie(Counts, SharedCounts) =
2880 sumOpInfos(L, ReusedExprs, ExprsInSubprogram, Shared);
2882 OptimizationRemark Rem(
DEBUG_TYPE,
"matrix-lowered", Loc,
2885 Rem <<
"Lowered with ";
2886 Rem <<
ore::NV(
"NumStores", Counts.NumStores) <<
" stores, "
2887 <<
ore::NV(
"NumLoads", Counts.NumLoads) <<
" loads, "
2888 <<
ore::NV(
"NumComputeOps", Counts.NumComputeOps)
2890 <<
ore::NV(
"NumExposedTransposes", Counts.NumExposedTransposes)
2891 <<
" exposed transposes";
2893 if (SharedCounts.NumStores > 0 || SharedCounts.NumLoads > 0 ||
2894 SharedCounts.NumComputeOps > 0) {
2895 Rem <<
",\nadditionally "
2896 <<
ore::NV(
"NumStores", SharedCounts.NumStores) <<
" stores, "
2897 <<
ore::NV(
"NumLoads", SharedCounts.NumLoads) <<
" loads, "
2898 <<
ore::NV(
"NumFPOps", SharedCounts.NumComputeOps)
2900 <<
" are shared with other expressions";
2903 Rem << (
"\n" + linearize(L, Shared, ExprsInSubprogram, DL));
2911 const DenseMap<
Value *, SmallPtrSet<Value *, 2>> &Shared,
2912 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
2913 const DataLayout &DL) {
2914 ExprLinearizer Lin(DL, Inst2Matrix, Shared, ExprsInSubprogram, L);
2915 Lin.linearizeExpr(L, 0,
false,
false);
2916 return Lin.getResult();
2926 LowerMatrixIntrinsics LMT(
F,
TTI, Minimal ?
nullptr : &AM);
2941 OS, MapClassName2PassName);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
static Type * getIndexType(Value *In)
hexagon Hexagon specific predictive commoning for HVX vectors
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static DISubprogram * getSubprogram(DIScope *Scope)
Helper function to either return Scope, if it is a subprogram or the attached subprogram for a local ...
static cl::opt< bool > ForceFusion("force-fuse-matrix", cl::init(false), cl::Hidden, cl::desc("Force matrix instruction fusion even if not profitable."))
static auto m_AnyAdd(const LTy &L, const RTy &R)
Match any add operation (fp or integer).
static cl::opt< bool > VerifyShapeInfo("verify-matrix-shapes", cl::Hidden, cl::desc("Enable/disable matrix shape verification."), cl::init(false))
static bool isShapePreserving(Value *V)
static cl::opt< unsigned > TileLoopsThreshold("fuse-matrix-loops-threshold", cl::init(200), cl::Hidden, cl::desc("Generate loop nests for tiling when expected " "number of operations exceeds threshold."))
static auto m_AnyMul(const LTy &L, const RTy &R)
Match any mul operation (fp or integer).
static cl::opt< unsigned > SplitMatmulRemainderOverThreshold("matrix-split-matmul-remainder-over-threshold", cl::Hidden, cl::desc("Illegal remainder vectors over this size in bits should be split " "in the inner loop of matmul"), cl::init(0))
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
static cl::opt< bool > FuseMatrix("fuse-matrix", cl::init(true), cl::Hidden, cl::desc("Enable/disable fusing matrix instructions."))
static cl::opt< bool > AllowContractEnabled("matrix-allow-contract", cl::init(false), cl::Hidden, cl::desc("Allow the use of FMAs if available and profitable. This may " "result in different results, due to less rounding error."))
static std::optional< ShapeInfo > computeShapeInfoForInst(Instruction *I, const DenseMap< Value *, ShapeInfo > &ShapeMap)
Return the ShapeInfo for the result of I, it it can be determined.
static cl::opt< bool > PrintAfterTransposeOpt("matrix-print-after-transpose-opt", cl::init(false))
static iterator_range< Use * > getShapedOperandsForInst(Instruction *I)
Return an iterator over the operands of I that should share shape information with I.
static Value * computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride, unsigned NumElements, Type *EltType, IRBuilder<> &Builder)
static cl::opt< unsigned > TileSize("fuse-matrix-tile-size", cl::init(4), cl::Hidden, cl::desc("Tile size for matrix instruction fusion using square-shaped tiles."))
static cl::opt< MatrixLayoutTy > MatrixLayout("matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor), cl::desc("Sets the default matrix layout"), cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major", "Use column-major layout"), clEnumValN(MatrixLayoutTy::RowMajor, "row-major", "Use row-major layout")))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static Value * extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex, unsigned EndIndex, const Twine &Name)
static Value * insertVector(IRBuilderTy &IRB, Value *Old, Value *V, unsigned BeginIndex, const Twine &Name)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static const int BlockSize
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
reverse_iterator rbegin()
InstListType::reverse_iterator reverse_iterator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
LLVM_ABI DISubprogram * getSubprogram() const
Get the subprogram for this scope.
Base class for scope-like contexts.
Subprogram description. Uses SubclassData1.
iterator find(const_arg_type_t< KeyT > Val)
Analysis pass which computes a DominatorTree.
static constexpr ElementCount getFixed(ScalarTy MinVal)
void setAllowContract(bool B=true)
bool allowReassoc() const
Flag queries.
bool allowContract() const
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags noUnsignedWrap()
LLVM_ABI CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Value * CreateFAdd(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended from a 64-bit value.
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Align getAlign() const
Return the alignment of the access that is being performed.
TypeSize getValue() const
Analysis pass that exposes the LoopInfo for a function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
CallInst * CreateMatrixTranspose(Value *Matrix, unsigned Rows, unsigned Columns, const Twine &Name="")
Create a llvm.matrix.transpose call, transposing Matrix with Rows rows and Columns columns.
CallInst * CreateMatrixMultiply(Value *LHS, Value *RHS, unsigned LHSRows, unsigned LHSColumns, unsigned RHSColumns, const Twine &Name="")
Create a llvm.matrix.multiply call, multiplying matrixes LHS and RHS.
static LLVM_ABI MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
const Value * Ptr
The address of the start of the location.
static LLVM_ABI MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, const TargetLibraryInfo *TLI)
Return a location representing a particular argument of a call.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
size_type size() const
Determine the number of elements in the SetVector.
void insert_range(Range &&R)
size_type count(const_arg_type key) const
Count the number of elements of a given key in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool erase(PtrType Ptr)
Remove pointer from the set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
bool isVolatile() const
Return true if this is a store to a volatile memory location.
StringRef - Represent a constant reference to a string, i.e.
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
constexpr size_t size() const
size - Get the string size.
Analysis pass providing the TargetTransformInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isVoidTy() const
Return true if this is 'void'.
UnaryOps getOpcode() const
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
LLVM_ABI StringRef getBaseName(ID id)
Return the LLVM name for an intrinsic, without encoded types for overloading, such as "llvm....
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
TwoOps_match< ValueOpTy, PointerOpTy, Instruction::Store > m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp)
Matches StoreInst.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)
Matches LoadInst.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
ElementType
The element type of an SRV or UAV resource.
DiagnosticInfoOptimizationBase::Argument NV
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
void fill(R &&Range, T &&Value)
Provide wrappers to std::fill which take ranges instead of having to pass begin/end explicitly.
cl::opt< bool > ProfcheckDisableMetadataFixes
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
bool operator!=(uint64_t V1, const APInt &V2)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt & operator+=(DynamicAPInt &A, int64_t B)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
LLVM_ABI void addStringMetadataToLoop(Loop *TheLoop, const char *MDString, unsigned V=0)
Set input string into loop metadata by keeping other values intact.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Mul
Product of integers.
LLVM_ABI BasicBlock * SplitBlock(BasicBlock *Old, BasicBlock::iterator SplitPt, DominatorTree *DT, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the specified block at the specified instruction.
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt copy(R &&Range, OutputIt Out)
LLVM_ABI Error write(MCStreamer &Out, ArrayRef< std::string > Inputs, OnCuIndexOverflow OverflowOptValue, Dwarf64StrOffsetsPromotion StrOffsetsOptValue)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
LLVM_ABI llvm::SmallVector< int, 16 > createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A CRTP mix-in to automatically provide informational APIs needed for passes.