LLVM 19.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
42 getTII().get(Opcode));
43}
44
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49}
50
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr));
90}
91
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(CI);
112 else
113 MIB.addImm(CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115 MIB.addFPImm(CFP);
116 } else if (isa<ConstantPointerNull>(NumericConstant)) {
117 MIB.addImm(0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(Register());
121 }
122
123 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124 return insertInstr(MIB);
125}
126
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(Label);
134}
135
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(*getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Alignment.value());
144 return MIB;
145}
146
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(*getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154}
155
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(*getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167}
168
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(*getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176}
177
179 unsigned JTI) {
180 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181 .addJumpTableIndex(JTI);
182}
183
184void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187}
188
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193}
194
195void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199}
200
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209}
210
211std::optional<MachineInstrBuilder>
213 const LLT ValueTy, uint64_t Value) {
214 assert(Res == 0 && "Res is a result argument");
215 assert(ValueTy.isScalar() && "invalid offset type");
216
217 if (Value == 0) {
218 Res = Op0;
219 return std::nullopt;
220 }
221
223 auto Cst = buildConstant(ValueTy, Value);
224 return buildPtrAdd(Res, Op0, Cst.getReg(0));
225}
226
228 const SrcOp &Op0,
229 uint32_t NumBits) {
230 LLT PtrTy = Res.getLLTTy(*getMRI());
231 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
232 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
233 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
234 return buildPtrMask(Res, Op0, MaskReg);
235}
236
239 const SrcOp &Op0) {
240 LLT ResTy = Res.getLLTTy(*getMRI());
241 LLT Op0Ty = Op0.getLLTTy(*getMRI());
242
243 assert(ResTy.isVector() && "Res non vector type");
244
246 if (Op0Ty.isVector()) {
247 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248 "Different vector element types");
249 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250 "Op0 has more elements");
251 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
252
253 for (auto Op : Unmerge.getInstr()->defs())
254 Regs.push_back(Op.getReg());
255 } else {
256 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257 "Op0 has more size");
258 Regs.push_back(Op0.getReg());
259 }
260 Register Undef =
261 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
262 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263 for (unsigned i = 0; i < NumberOfPadElts; ++i)
264 Regs.push_back(Undef);
265 return buildMergeLikeInstr(Res, Regs);
266}
267
270 const SrcOp &Op0) {
271 LLT ResTy = Res.getLLTTy(*getMRI());
272 LLT Op0Ty = Op0.getLLTTy(*getMRI());
273
274 assert(Op0Ty.isVector() && "Non vector type");
275 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276 (ResTy.isVector() &&
277 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278 "Different vector element types");
279 assert(
280 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281 "Op0 has fewer elements");
282
283 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
284 if (ResTy.isScalar())
285 return buildCopy(Res, Unmerge.getReg(0));
287 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288 Regs.push_back(Unmerge.getReg(i));
289 return buildMergeLikeInstr(Res, Regs);
290}
291
293 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
294}
295
297 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
299}
300
302 unsigned JTI,
303 Register IndexReg) {
304 assert(getMRI()->getType(TablePtr).isPointer() &&
305 "Table reg must be a pointer");
306 return buildInstr(TargetOpcode::G_BRJT)
307 .addUse(TablePtr)
309 .addUse(IndexReg);
310}
311
313 const SrcOp &Op) {
314 return buildInstr(TargetOpcode::COPY, Res, Op);
315}
316
318 const ConstantInt &Val) {
319 LLT Ty = Res.getLLTTy(*getMRI());
320 LLT EltTy = Ty.getScalarType();
321 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
322 "creating constant with the wrong size");
323
324 assert(!Ty.isScalableVector() &&
325 "unexpected scalable vector in buildConstant");
326
327 if (Ty.isFixedVector()) {
328 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
329 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
330 .addCImm(&Val);
331 return buildSplatBuildVector(Res, Const);
332 }
333
334 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
335 Const->setDebugLoc(DebugLoc());
336 Res.addDefToMIB(*getMRI(), Const);
337 Const.addCImm(&Val);
338 return Const;
339}
340
342 int64_t Val) {
345 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
346 return buildConstant(Res, *CI);
347}
348
350 const ConstantFP &Val) {
351 LLT Ty = Res.getLLTTy(*getMRI());
352 LLT EltTy = Ty.getScalarType();
353
355 == EltTy.getSizeInBits() &&
356 "creating fconstant with the wrong size");
357
358 assert(!Ty.isPointer() && "invalid operand type");
359
360 assert(!Ty.isScalableVector() &&
361 "unexpected scalable vector in buildFConstant");
362
363 if (Ty.isFixedVector()) {
364 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
365 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
366 .addFPImm(&Val);
367
368 return buildSplatBuildVector(Res, Const);
369 }
370
371 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
372 Const->setDebugLoc(DebugLoc());
373 Res.addDefToMIB(*getMRI(), Const);
374 Const.addFPImm(&Val);
375 return Const;
376}
377
379 const APInt &Val) {
380 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
381 return buildConstant(Res, *CI);
382}
383
385 double Val) {
386 LLT DstTy = Res.getLLTTy(*getMRI());
387 auto &Ctx = getMF().getFunction().getContext();
388 auto *CFP =
389 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
390 return buildFConstant(Res, *CFP);
391}
392
394 const APFloat &Val) {
395 auto &Ctx = getMF().getFunction().getContext();
396 auto *CFP = ConstantFP::get(Ctx, Val);
397 return buildFConstant(Res, *CFP);
398}
399
401 MachineBasicBlock &Dest) {
402 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
403
404 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
405 Tst.addSrcToMIB(MIB);
406 MIB.addMBB(&Dest);
407 return MIB;
408}
409
412 MachinePointerInfo PtrInfo, Align Alignment,
414 const AAMDNodes &AAInfo) {
415 MMOFlags |= MachineMemOperand::MOLoad;
416 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
417
418 LLT Ty = Dst.getLLTTy(*getMRI());
419 MachineMemOperand *MMO =
420 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
421 return buildLoad(Dst, Addr, *MMO);
422}
423
425 const DstOp &Res,
426 const SrcOp &Addr,
427 MachineMemOperand &MMO) {
428 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
429 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
430
431 auto MIB = buildInstr(Opcode);
432 Res.addDefToMIB(*getMRI(), MIB);
433 Addr.addSrcToMIB(MIB);
434 MIB.addMemOperand(&MMO);
435 return MIB;
436}
437
439 const DstOp &Dst, const SrcOp &BasePtr,
440 MachineMemOperand &BaseMMO, int64_t Offset) {
441 LLT LoadTy = Dst.getLLTTy(*getMRI());
442 MachineMemOperand *OffsetMMO =
443 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
444
445 if (Offset == 0) // This may be a size or type changing load.
446 return buildLoad(Dst, BasePtr, *OffsetMMO);
447
448 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
449 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
450 auto ConstOffset = buildConstant(OffsetTy, Offset);
451 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
452 return buildLoad(Dst, Ptr, *OffsetMMO);
453}
454
456 const SrcOp &Addr,
457 MachineMemOperand &MMO) {
458 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
459 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
460
461 auto MIB = buildInstr(TargetOpcode::G_STORE);
462 Val.addSrcToMIB(MIB);
463 Addr.addSrcToMIB(MIB);
464 MIB.addMemOperand(&MMO);
465 return MIB;
466}
467
470 MachinePointerInfo PtrInfo, Align Alignment,
472 const AAMDNodes &AAInfo) {
473 MMOFlags |= MachineMemOperand::MOStore;
474 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
475
476 LLT Ty = Val.getLLTTy(*getMRI());
477 MachineMemOperand *MMO =
478 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
479 return buildStore(Val, Addr, *MMO);
480}
481
483 const SrcOp &Op) {
484 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
485}
486
488 const SrcOp &Op) {
489 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
490}
491
493 const SrcOp &Op) {
494 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
495}
496
497unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
498 const auto *TLI = getMF().getSubtarget().getTargetLowering();
499 switch (TLI->getBooleanContents(IsVec, IsFP)) {
501 return TargetOpcode::G_SEXT;
503 return TargetOpcode::G_ZEXT;
504 default:
505 return TargetOpcode::G_ANYEXT;
506 }
507}
508
510 const SrcOp &Op,
511 bool IsFP) {
512 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
513 return buildInstr(ExtOp, Res, Op);
514}
515
517 const SrcOp &Op,
518 bool IsVector,
519 bool IsFP) {
520 const auto *TLI = getMF().getSubtarget().getTargetLowering();
521 switch (TLI->getBooleanContents(IsVector, IsFP)) {
523 return buildSExtInReg(Res, Op, 1);
525 return buildZExtInReg(Res, Op, 1);
527 return buildCopy(Res, Op);
528 }
529
530 llvm_unreachable("unexpected BooleanContent");
531}
532
534 const DstOp &Res,
535 const SrcOp &Op) {
536 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
537 TargetOpcode::G_SEXT == ExtOpc) &&
538 "Expecting Extending Opc");
539 assert(Res.getLLTTy(*getMRI()).isScalar() ||
540 Res.getLLTTy(*getMRI()).isVector());
541 assert(Res.getLLTTy(*getMRI()).isScalar() ==
542 Op.getLLTTy(*getMRI()).isScalar());
543
544 unsigned Opcode = TargetOpcode::COPY;
545 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
546 Op.getLLTTy(*getMRI()).getSizeInBits())
547 Opcode = ExtOpc;
548 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
549 Op.getLLTTy(*getMRI()).getSizeInBits())
550 Opcode = TargetOpcode::G_TRUNC;
551 else
552 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
553
554 return buildInstr(Opcode, Res, Op);
555}
556
558 const SrcOp &Op) {
559 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
560}
561
563 const SrcOp &Op) {
564 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
565}
566
568 const SrcOp &Op) {
569 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
570}
571
573 const SrcOp &Op,
574 int64_t ImmOp) {
575 LLT ResTy = Res.getLLTTy(*getMRI());
576 auto Mask = buildConstant(
577 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
578 return buildAnd(Res, Op, Mask);
579}
580
582 const SrcOp &Src) {
583 LLT SrcTy = Src.getLLTTy(*getMRI());
584 LLT DstTy = Dst.getLLTTy(*getMRI());
585 if (SrcTy == DstTy)
586 return buildCopy(Dst, Src);
587
588 unsigned Opcode;
589 if (SrcTy.isPointer() && DstTy.isScalar())
590 Opcode = TargetOpcode::G_PTRTOINT;
591 else if (DstTy.isPointer() && SrcTy.isScalar())
592 Opcode = TargetOpcode::G_INTTOPTR;
593 else {
594 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
595 Opcode = TargetOpcode::G_BITCAST;
596 }
597
598 return buildInstr(Opcode, Dst, Src);
599}
600
602 const SrcOp &Src,
603 uint64_t Index) {
604 LLT SrcTy = Src.getLLTTy(*getMRI());
605 LLT DstTy = Dst.getLLTTy(*getMRI());
606
607#ifndef NDEBUG
608 assert(SrcTy.isValid() && "invalid operand type");
609 assert(DstTy.isValid() && "invalid operand type");
610 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
611 "extracting off end of register");
612#endif
613
614 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
615 assert(Index == 0 && "insertion past the end of a register");
616 return buildCast(Dst, Src);
617 }
618
619 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
620 Dst.addDefToMIB(*getMRI(), Extract);
621 Src.addSrcToMIB(Extract);
622 Extract.addImm(Index);
623 return Extract;
624}
625
627 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
628}
629
631 ArrayRef<Register> Ops) {
632 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
633 // we need some temporary storage for the DstOp objects. Here we use a
634 // sufficiently large SmallVector to not go through the heap.
635 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
636 assert(TmpVec.size() > 1);
637 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
638}
639
642 ArrayRef<Register> Ops) {
643 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
644 // we need some temporary storage for the DstOp objects. Here we use a
645 // sufficiently large SmallVector to not go through the heap.
646 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
647 assert(TmpVec.size() > 1);
648 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
649}
650
653 std::initializer_list<SrcOp> Ops) {
654 assert(Ops.size() > 1);
655 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
656}
657
658unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
659 ArrayRef<SrcOp> SrcOps) const {
660 if (DstOp.getLLTTy(*getMRI()).isVector()) {
661 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
662 return TargetOpcode::G_CONCAT_VECTORS;
663 return TargetOpcode::G_BUILD_VECTOR;
664 }
665
666 return TargetOpcode::G_MERGE_VALUES;
667}
668
670 const SrcOp &Op) {
671 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
672 // we need some temporary storage for the DstOp objects. Here we use a
673 // sufficiently large SmallVector to not go through the heap.
674 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
675 assert(TmpVec.size() > 1);
676 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
677}
678
680 const SrcOp &Op) {
681 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
682 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
683 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
684}
685
687 const SrcOp &Op) {
688 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
689 // we need some temporary storage for the DstOp objects. Here we use a
690 // sufficiently large SmallVector to not go through the heap.
691 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
692 assert(TmpVec.size() > 1);
693 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
694}
695
697 ArrayRef<Register> Ops) {
698 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
699 // we need some temporary storage for the DstOp objects. Here we use a
700 // sufficiently large SmallVector to not go through the heap.
701 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
702 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
703}
704
707 ArrayRef<APInt> Ops) {
708 SmallVector<SrcOp> TmpVec;
709 TmpVec.reserve(Ops.size());
710 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
711 for (const auto &Op : Ops)
712 TmpVec.push_back(buildConstant(EltTy, Op));
713 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
714}
715
717 const SrcOp &Src) {
718 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
719 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
720}
721
724 ArrayRef<Register> Ops) {
725 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
726 // we need some temporary storage for the DstOp objects. Here we use a
727 // sufficiently large SmallVector to not go through the heap.
728 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
729 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
730 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
731 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
732 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
733}
734
736 const SrcOp &Src) {
737 LLT DstTy = Res.getLLTTy(*getMRI());
738 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
739 "Expected Src to match Dst elt ty");
740 auto UndefVec = buildUndef(DstTy);
741 auto Zero = buildConstant(LLT::scalar(64), 0);
742 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
743 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
744 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
745}
746
748 const SrcOp &Src) {
749 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
750 "Expected Src to match Dst elt ty");
751 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
752}
753
755 const SrcOp &Src1,
756 const SrcOp &Src2,
757 ArrayRef<int> Mask) {
758 LLT DstTy = Res.getLLTTy(*getMRI());
759 LLT Src1Ty = Src1.getLLTTy(*getMRI());
760 LLT Src2Ty = Src2.getLLTTy(*getMRI());
761 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
762 Mask.size());
763 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
764 DstTy.getElementType() == Src2Ty.getElementType());
765 (void)DstTy;
766 (void)Src1Ty;
767 (void)Src2Ty;
768 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
769 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
770 .addShuffleMask(MaskAlloc);
771}
772
775 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
776 // we need some temporary storage for the DstOp objects. Here we use a
777 // sufficiently large SmallVector to not go through the heap.
778 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
779 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
780}
781
783 const SrcOp &Src,
784 const SrcOp &Op,
785 unsigned Index) {
786 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
787 Res.getLLTTy(*getMRI()).getSizeInBits() &&
788 "insertion past the end of a register");
789
790 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
791 Op.getLLTTy(*getMRI()).getSizeInBits()) {
792 return buildCast(Res, Op);
793 }
794
795 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
796}
797
799 unsigned MinElts) {
800
803 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
804 return buildVScale(Res, *CI);
805}
806
808 const ConstantInt &MinElts) {
809 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
810 VScale->setDebugLoc(DebugLoc());
811 Res.addDefToMIB(*getMRI(), VScale);
812 VScale.addCImm(&MinElts);
813 return VScale;
814}
815
817 const APInt &MinElts) {
818 ConstantInt *CI =
819 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
820 return buildVScale(Res, *CI);
821}
822
823static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
824 if (HasSideEffects && IsConvergent)
825 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
826 if (HasSideEffects)
827 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
828 if (IsConvergent)
829 return TargetOpcode::G_INTRINSIC_CONVERGENT;
830 return TargetOpcode::G_INTRINSIC;
831}
832
835 ArrayRef<Register> ResultRegs,
836 bool HasSideEffects, bool isConvergent) {
837 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
838 for (unsigned ResultReg : ResultRegs)
839 MIB.addDef(ResultReg);
840 MIB.addIntrinsicID(ID);
841 return MIB;
842}
843
846 ArrayRef<Register> ResultRegs) {
847 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
848 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
849 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
850 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
851}
852
855 bool HasSideEffects,
856 bool isConvergent) {
857 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
858 for (DstOp Result : Results)
859 Result.addDefToMIB(*getMRI(), MIB);
860 MIB.addIntrinsicID(ID);
861 return MIB;
862}
863
866 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
867 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
868 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
869 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
873 const SrcOp &Op) {
874 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
875}
876
879 std::optional<unsigned> Flags) {
880 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
881}
884 const DstOp &Res,
885 const SrcOp &Op0,
886 const SrcOp &Op1) {
887 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
888}
889
891 const DstOp &Res,
892 const SrcOp &Op0,
893 const SrcOp &Op1,
894 std::optional<unsigned> Flags) {
895
896 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
897}
898
901 const SrcOp &Op0, const SrcOp &Op1,
902 std::optional<unsigned> Flags) {
903
904 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
905}
906
908 const SrcOp &Src0,
909 const SrcOp &Src1,
910 unsigned Idx) {
911 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
912 {Src0, Src1, uint64_t(Idx)});
913}
914
916 const SrcOp &Src,
917 unsigned Idx) {
918 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
919 {Src, uint64_t(Idx)});
920}
921
924 const SrcOp &Elt, const SrcOp &Idx) {
925 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
926}
927
930 const SrcOp &Idx) {
931 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
932}
933
935 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
936 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
937#ifndef NDEBUG
938 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
939 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
940 LLT AddrTy = Addr.getLLTTy(*getMRI());
941 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
942 LLT NewValTy = NewVal.getLLTTy(*getMRI());
943 assert(OldValResTy.isScalar() && "invalid operand type");
944 assert(SuccessResTy.isScalar() && "invalid operand type");
945 assert(AddrTy.isPointer() && "invalid operand type");
946 assert(CmpValTy.isValid() && "invalid operand type");
947 assert(NewValTy.isValid() && "invalid operand type");
948 assert(OldValResTy == CmpValTy && "type mismatch");
949 assert(OldValResTy == NewValTy && "type mismatch");
950#endif
951
952 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
953 OldValRes.addDefToMIB(*getMRI(), MIB);
954 SuccessRes.addDefToMIB(*getMRI(), MIB);
955 Addr.addSrcToMIB(MIB);
956 CmpVal.addSrcToMIB(MIB);
957 NewVal.addSrcToMIB(MIB);
958 MIB.addMemOperand(&MMO);
959 return MIB;
960}
961
964 const SrcOp &CmpVal, const SrcOp &NewVal,
965 MachineMemOperand &MMO) {
966#ifndef NDEBUG
967 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
968 LLT AddrTy = Addr.getLLTTy(*getMRI());
969 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
970 LLT NewValTy = NewVal.getLLTTy(*getMRI());
971 assert(OldValResTy.isScalar() && "invalid operand type");
972 assert(AddrTy.isPointer() && "invalid operand type");
973 assert(CmpValTy.isValid() && "invalid operand type");
974 assert(NewValTy.isValid() && "invalid operand type");
975 assert(OldValResTy == CmpValTy && "type mismatch");
976 assert(OldValResTy == NewValTy && "type mismatch");
977#endif
978
979 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
980 OldValRes.addDefToMIB(*getMRI(), MIB);
981 Addr.addSrcToMIB(MIB);
982 CmpVal.addSrcToMIB(MIB);
983 NewVal.addSrcToMIB(MIB);
984 MIB.addMemOperand(&MMO);
985 return MIB;
986}
987
989 unsigned Opcode, const DstOp &OldValRes,
990 const SrcOp &Addr, const SrcOp &Val,
991 MachineMemOperand &MMO) {
992
993#ifndef NDEBUG
994 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
995 LLT AddrTy = Addr.getLLTTy(*getMRI());
996 LLT ValTy = Val.getLLTTy(*getMRI());
997 assert(OldValResTy.isScalar() && "invalid operand type");
998 assert(AddrTy.isPointer() && "invalid operand type");
999 assert(ValTy.isValid() && "invalid operand type");
1000 assert(OldValResTy == ValTy && "type mismatch");
1001 assert(MMO.isAtomic() && "not atomic mem operand");
1002#endif
1003
1004 auto MIB = buildInstr(Opcode);
1005 OldValRes.addDefToMIB(*getMRI(), MIB);
1006 Addr.addSrcToMIB(MIB);
1007 Val.addSrcToMIB(MIB);
1008 MIB.addMemOperand(&MMO);
1009 return MIB;
1010}
1011
1014 Register Val, MachineMemOperand &MMO) {
1015 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1016 MMO);
1017}
1020 Register Val, MachineMemOperand &MMO) {
1021 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1022 MMO);
1023}
1026 Register Val, MachineMemOperand &MMO) {
1027 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1028 MMO);
1029}
1032 Register Val, MachineMemOperand &MMO) {
1033 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1034 MMO);
1035}
1038 Register Val, MachineMemOperand &MMO) {
1039 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1040 MMO);
1041}
1043 Register Addr,
1044 Register Val,
1045 MachineMemOperand &MMO) {
1046 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1047 MMO);
1048}
1051 Register Val, MachineMemOperand &MMO) {
1052 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1053 MMO);
1054}
1057 Register Val, MachineMemOperand &MMO) {
1058 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1059 MMO);
1060}
1063 Register Val, MachineMemOperand &MMO) {
1064 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1065 MMO);
1066}
1069 Register Val, MachineMemOperand &MMO) {
1070 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1071 MMO);
1072}
1075 Register Val, MachineMemOperand &MMO) {
1076 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1077 MMO);
1078}
1079
1082 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1083 MachineMemOperand &MMO) {
1084 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1085 MMO);
1086}
1087
1089MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1090 MachineMemOperand &MMO) {
1091 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1092 MMO);
1093}
1094
1097 const SrcOp &Val, MachineMemOperand &MMO) {
1098 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1099 MMO);
1100}
1101
1104 const SrcOp &Val, MachineMemOperand &MMO) {
1105 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1106 MMO);
1107}
1108
1110MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1111 return buildInstr(TargetOpcode::G_FENCE)
1112 .addImm(Ordering)
1113 .addImm(Scope);
1114}
1115
1117 unsigned RW,
1118 unsigned Locality,
1119 unsigned CacheType,
1120 MachineMemOperand &MMO) {
1121 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1122 Addr.addSrcToMIB(MIB);
1123 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1124 MIB.addMemOperand(&MMO);
1125 return MIB;
1126}
1127
1130#ifndef NDEBUG
1131 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1132#endif
1133
1134 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1135}
1136
1137void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1138 bool IsExtend) {
1139#ifndef NDEBUG
1140 if (DstTy.isVector()) {
1141 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1142 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1143 "different number of elements in a trunc/ext");
1144 } else
1145 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1146
1147 if (IsExtend)
1149 "invalid narrowing extend");
1150 else
1152 "invalid widening trunc");
1153#endif
1154}
1155
1156void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1157 const LLT Op0Ty, const LLT Op1Ty) {
1158#ifndef NDEBUG
1159 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1160 "invalid operand type");
1161 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1162 if (ResTy.isScalar() || ResTy.isPointer())
1163 assert(TstTy.isScalar() && "type mismatch");
1164 else
1165 assert((TstTy.isScalar() ||
1166 (TstTy.isVector() &&
1167 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1168 "type mismatch");
1169#endif
1170}
1171
1174 ArrayRef<SrcOp> SrcOps,
1175 std::optional<unsigned> Flags) {
1176 switch (Opc) {
1177 default:
1178 break;
1179 case TargetOpcode::G_SELECT: {
1180 assert(DstOps.size() == 1 && "Invalid select");
1181 assert(SrcOps.size() == 3 && "Invalid select");
1183 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1184 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1185 break;
1186 }
1187 case TargetOpcode::G_FNEG:
1188 case TargetOpcode::G_ABS:
1189 // All these are unary ops.
1190 assert(DstOps.size() == 1 && "Invalid Dst");
1191 assert(SrcOps.size() == 1 && "Invalid Srcs");
1192 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1193 SrcOps[0].getLLTTy(*getMRI()));
1194 break;
1195 case TargetOpcode::G_ADD:
1196 case TargetOpcode::G_AND:
1197 case TargetOpcode::G_MUL:
1198 case TargetOpcode::G_OR:
1199 case TargetOpcode::G_SUB:
1200 case TargetOpcode::G_XOR:
1201 case TargetOpcode::G_UDIV:
1202 case TargetOpcode::G_SDIV:
1203 case TargetOpcode::G_UREM:
1204 case TargetOpcode::G_SREM:
1205 case TargetOpcode::G_SMIN:
1206 case TargetOpcode::G_SMAX:
1207 case TargetOpcode::G_UMIN:
1208 case TargetOpcode::G_UMAX:
1209 case TargetOpcode::G_UADDSAT:
1210 case TargetOpcode::G_SADDSAT:
1211 case TargetOpcode::G_USUBSAT:
1212 case TargetOpcode::G_SSUBSAT: {
1213 // All these are binary ops.
1214 assert(DstOps.size() == 1 && "Invalid Dst");
1215 assert(SrcOps.size() == 2 && "Invalid Srcs");
1216 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1217 SrcOps[0].getLLTTy(*getMRI()),
1218 SrcOps[1].getLLTTy(*getMRI()));
1219 break;
1220 }
1221 case TargetOpcode::G_SHL:
1222 case TargetOpcode::G_ASHR:
1223 case TargetOpcode::G_LSHR:
1224 case TargetOpcode::G_USHLSAT:
1225 case TargetOpcode::G_SSHLSAT: {
1226 assert(DstOps.size() == 1 && "Invalid Dst");
1227 assert(SrcOps.size() == 2 && "Invalid Srcs");
1228 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1229 SrcOps[0].getLLTTy(*getMRI()),
1230 SrcOps[1].getLLTTy(*getMRI()));
1231 break;
1232 }
1233 case TargetOpcode::G_SEXT:
1234 case TargetOpcode::G_ZEXT:
1235 case TargetOpcode::G_ANYEXT:
1236 assert(DstOps.size() == 1 && "Invalid Dst");
1237 assert(SrcOps.size() == 1 && "Invalid Srcs");
1238 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1239 SrcOps[0].getLLTTy(*getMRI()), true);
1240 break;
1241 case TargetOpcode::G_TRUNC:
1242 case TargetOpcode::G_FPTRUNC: {
1243 assert(DstOps.size() == 1 && "Invalid Dst");
1244 assert(SrcOps.size() == 1 && "Invalid Srcs");
1245 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1246 SrcOps[0].getLLTTy(*getMRI()), false);
1247 break;
1248 }
1249 case TargetOpcode::G_BITCAST: {
1250 assert(DstOps.size() == 1 && "Invalid Dst");
1251 assert(SrcOps.size() == 1 && "Invalid Srcs");
1252 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1253 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1254 break;
1255 }
1256 case TargetOpcode::COPY:
1257 assert(DstOps.size() == 1 && "Invalid Dst");
1258 // If the caller wants to add a subreg source it has to be done separately
1259 // so we may not have any SrcOps at this point yet.
1260 break;
1261 case TargetOpcode::G_FCMP:
1262 case TargetOpcode::G_ICMP: {
1263 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1264 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1265 // For F/ICMP, the first src operand is the predicate, followed by
1266 // the two comparands.
1267 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1268 "Expecting predicate");
1269 assert([&]() -> bool {
1270 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1271 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1272 : CmpInst::isFPPredicate(Pred);
1273 }() && "Invalid predicate");
1274 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1275 "Type mismatch");
1276 assert([&]() -> bool {
1277 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1278 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1279 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1280 return DstTy.isScalar();
1281 else
1282 return DstTy.isVector() &&
1283 DstTy.getElementCount() == Op0Ty.getElementCount();
1284 }() && "Type Mismatch");
1285 break;
1286 }
1287 case TargetOpcode::G_UNMERGE_VALUES: {
1288 assert(!DstOps.empty() && "Invalid trivial sequence");
1289 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1290 assert(llvm::all_of(DstOps,
1291 [&, this](const DstOp &Op) {
1292 return Op.getLLTTy(*getMRI()) ==
1293 DstOps[0].getLLTTy(*getMRI());
1294 }) &&
1295 "type mismatch in output list");
1296 assert((TypeSize::ScalarTy)DstOps.size() *
1297 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1298 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1299 "input operands do not cover output register");
1300 break;
1301 }
1302 case TargetOpcode::G_MERGE_VALUES: {
1303 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1304 assert(DstOps.size() == 1 && "Invalid Dst");
1305 assert(llvm::all_of(SrcOps,
1306 [&, this](const SrcOp &Op) {
1307 return Op.getLLTTy(*getMRI()) ==
1308 SrcOps[0].getLLTTy(*getMRI());
1309 }) &&
1310 "type mismatch in input list");
1311 assert((TypeSize::ScalarTy)SrcOps.size() *
1312 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1313 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1314 "input operands do not cover output register");
1315 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1316 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1317 break;
1318 }
1319 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1320 assert(DstOps.size() == 1 && "Invalid Dst size");
1321 assert(SrcOps.size() == 2 && "Invalid Src size");
1322 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1323 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1324 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1325 "Invalid operand type");
1326 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1327 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1328 DstOps[0].getLLTTy(*getMRI()) &&
1329 "Type mismatch");
1330 break;
1331 }
1332 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1333 assert(DstOps.size() == 1 && "Invalid dst size");
1334 assert(SrcOps.size() == 3 && "Invalid src size");
1335 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1336 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1337 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1338 SrcOps[1].getLLTTy(*getMRI()) &&
1339 "Type mismatch");
1340 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1341 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1342 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1343 "Type mismatch");
1344 break;
1345 }
1346 case TargetOpcode::G_BUILD_VECTOR: {
1347 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1348 "Must have at least 2 operands");
1349 assert(DstOps.size() == 1 && "Invalid DstOps");
1350 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1351 "Res type must be a vector");
1352 assert(llvm::all_of(SrcOps,
1353 [&, this](const SrcOp &Op) {
1354 return Op.getLLTTy(*getMRI()) ==
1355 SrcOps[0].getLLTTy(*getMRI());
1356 }) &&
1357 "type mismatch in input list");
1358 assert((TypeSize::ScalarTy)SrcOps.size() *
1359 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1360 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1361 "input scalars do not exactly cover the output vector register");
1362 break;
1363 }
1364 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1365 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1366 "Must have at least 2 operands");
1367 assert(DstOps.size() == 1 && "Invalid DstOps");
1368 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1369 "Res type must be a vector");
1370 assert(llvm::all_of(SrcOps,
1371 [&, this](const SrcOp &Op) {
1372 return Op.getLLTTy(*getMRI()) ==
1373 SrcOps[0].getLLTTy(*getMRI());
1374 }) &&
1375 "type mismatch in input list");
1376 break;
1377 }
1378 case TargetOpcode::G_CONCAT_VECTORS: {
1379 assert(DstOps.size() == 1 && "Invalid DstOps");
1380 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1381 "Must have at least 2 operands");
1382 assert(llvm::all_of(SrcOps,
1383 [&, this](const SrcOp &Op) {
1384 return (Op.getLLTTy(*getMRI()).isVector() &&
1385 Op.getLLTTy(*getMRI()) ==
1386 SrcOps[0].getLLTTy(*getMRI()));
1387 }) &&
1388 "type mismatch in input list");
1389 assert((TypeSize::ScalarTy)SrcOps.size() *
1390 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1391 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1392 "input vectors do not exactly cover the output vector register");
1393 break;
1394 }
1395 case TargetOpcode::G_UADDE: {
1396 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1397 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1398 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1399 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1400 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1401 "Invalid operand");
1402 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1403 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1404 "type mismatch");
1405 break;
1406 }
1407 }
1408
1409 auto MIB = buildInstr(Opc);
1410 for (const DstOp &Op : DstOps)
1411 Op.addDefToMIB(*getMRI(), MIB);
1412 for (const SrcOp &Op : SrcOps)
1413 Op.addSrcToMIB(MIB);
1414 if (Flags)
1415 MIB->setFlags(*Flags);
1416 return MIB;
1417}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
The address of a basic block.
Definition: Constants.h:889
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
bool isFPPredicate() const
Definition: InstrTypes.h:1122
bool isIntPredicate() const
Definition: InstrTypes.h:1123
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:148
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:356
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:1067
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:203
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:210
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
DWARFExpression::Operation Op
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:631
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
MDNode * MMRA
MMRA Metadata to be set on any instruction we create.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...