LLVM 22.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
44
50
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr));
90}
91
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(CI);
112 else
113 MIB.addImm(CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115 MIB.addFPImm(CFP);
116 } else if (isa<ConstantPointerNull>(NumericConstant)) {
117 MIB.addImm(0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(Register());
121 }
122
123 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124 return insertInstr(MIB);
125}
126
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(Label);
134}
135
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(*getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Alignment.value());
144 return MIB;
145}
146
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(*getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154}
155
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(*getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167}
168
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(*getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176}
177
179 unsigned JTI) {
180 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181 .addJumpTableIndex(JTI);
182}
183
184void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187}
188
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193}
194
195void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199}
200
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209}
210
218
219std::optional<MachineInstrBuilder>
221 const LLT ValueTy, uint64_t Value,
222 std::optional<unsigned> Flags) {
223 assert(Res == 0 && "Res is a result argument");
224 assert(ValueTy.isScalar() && "invalid offset type");
225
226 if (Value == 0) {
227 Res = Op0;
228 return std::nullopt;
229 }
230
232 auto Cst = buildConstant(ValueTy, Value);
233 return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
234}
235
236std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
237 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
238 return materializePtrAdd(Res, Op0, ValueTy, Value,
241}
242
244 const SrcOp &Op0,
245 uint32_t NumBits) {
246 LLT PtrTy = Res.getLLTTy(*getMRI());
247 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
248 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
250 return buildPtrMask(Res, Op0, MaskReg);
251}
252
255 const SrcOp &Op0) {
256 LLT ResTy = Res.getLLTTy(*getMRI());
257 LLT Op0Ty = Op0.getLLTTy(*getMRI());
258
259 assert(ResTy.isVector() && "Res non vector type");
260
262 if (Op0Ty.isVector()) {
263 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
264 "Different vector element types");
265 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
266 "Op0 has more elements");
267 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
268
269 for (auto Op : Unmerge.getInstr()->defs())
270 Regs.push_back(Op.getReg());
271 } else {
272 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
273 "Op0 has more size");
274 Regs.push_back(Op0.getReg());
275 }
276 Register Undef =
277 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
278 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
279 for (unsigned i = 0; i < NumberOfPadElts; ++i)
280 Regs.push_back(Undef);
281 return buildMergeLikeInstr(Res, Regs);
282}
283
286 const SrcOp &Op0) {
287 LLT ResTy = Res.getLLTTy(*getMRI());
288 LLT Op0Ty = Op0.getLLTTy(*getMRI());
289
290 assert(Op0Ty.isVector() && "Non vector type");
291 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
292 (ResTy.isVector() &&
293 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
294 "Different vector element types");
295 assert(
296 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
297 "Op0 has fewer elements");
298
299 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
300 if (ResTy.isScalar())
301 return buildCopy(Res, Unmerge.getReg(0));
303 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
304 Regs.push_back(Unmerge.getReg(i));
305 return buildMergeLikeInstr(Res, Regs);
306}
307
309 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
310}
311
313 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
314 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
315}
316
318 unsigned JTI,
319 Register IndexReg) {
320 assert(getMRI()->getType(TablePtr).isPointer() &&
321 "Table reg must be a pointer");
322 return buildInstr(TargetOpcode::G_BRJT)
323 .addUse(TablePtr)
325 .addUse(IndexReg);
326}
327
329 const SrcOp &Op) {
330 return buildInstr(TargetOpcode::COPY, Res, Op);
331}
332
334 const ConstantInt &Val) {
335 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
336 LLT Ty = Res.getLLTTy(*getMRI());
337 LLT EltTy = Ty.getScalarType();
338 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
339 "creating constant with the wrong size");
340
341 assert(!Ty.isScalableVector() &&
342 "unexpected scalable vector in buildConstant");
343
344 if (Ty.isFixedVector()) {
345 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
346 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
347 .addCImm(&Val);
348 return buildSplatBuildVector(Res, Const);
349 }
350
351 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
352 Const->setDebugLoc(DebugLoc());
353 Res.addDefToMIB(*getMRI(), Const);
354 Const.addCImm(&Val);
355 return Const;
356}
357
359 int64_t Val) {
362 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
363 return buildConstant(Res, *CI);
364}
365
367 const ConstantFP &Val) {
368 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
369 LLT Ty = Res.getLLTTy(*getMRI());
370 LLT EltTy = Ty.getScalarType();
371
373 == EltTy.getSizeInBits() &&
374 "creating fconstant with the wrong size");
375
376 assert(!Ty.isPointer() && "invalid operand type");
377
378 assert(!Ty.isScalableVector() &&
379 "unexpected scalable vector in buildFConstant");
380
381 if (Ty.isFixedVector()) {
382 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
383 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
384 .addFPImm(&Val);
385
386 return buildSplatBuildVector(Res, Const);
387 }
388
389 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
390 Const->setDebugLoc(DebugLoc());
391 Res.addDefToMIB(*getMRI(), Const);
392 Const.addFPImm(&Val);
393 return Const;
394}
395
397 const APInt &Val) {
398 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
399 return buildConstant(Res, *CI);
400}
401
403 double Val) {
404 LLT DstTy = Res.getLLTTy(*getMRI());
405 auto &Ctx = getMF().getFunction().getContext();
406 auto *CFP =
407 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
408 return buildFConstant(Res, *CFP);
409}
410
412 const APFloat &Val) {
413 auto &Ctx = getMF().getFunction().getContext();
414 auto *CFP = ConstantFP::get(Ctx, Val);
415 return buildFConstant(Res, *CFP);
416}
417
420 const ConstantPtrAuth *CPA,
421 Register Addr, Register AddrDisc) {
422 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
423 Res.addDefToMIB(*getMRI(), MIB);
424 MIB.addUse(Addr);
425 MIB.addImm(CPA->getKey()->getZExtValue());
426 MIB.addUse(AddrDisc);
427 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
428 return MIB;
429}
430
432 MachineBasicBlock &Dest) {
433 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
434
435 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
436 Tst.addSrcToMIB(MIB);
437 MIB.addMBB(&Dest);
438 return MIB;
439}
440
443 MachinePointerInfo PtrInfo, Align Alignment,
445 const AAMDNodes &AAInfo) {
446 MMOFlags |= MachineMemOperand::MOLoad;
447 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
448
449 LLT Ty = Dst.getLLTTy(*getMRI());
450 MachineMemOperand *MMO =
451 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
452 return buildLoad(Dst, Addr, *MMO);
453}
454
456 const DstOp &Res,
457 const SrcOp &Addr,
458 MachineMemOperand &MMO) {
459 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
460 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
461
462 auto MIB = buildInstr(Opcode);
463 Res.addDefToMIB(*getMRI(), MIB);
464 Addr.addSrcToMIB(MIB);
465 MIB.addMemOperand(&MMO);
466 return MIB;
467}
468
470 const DstOp &Dst, const SrcOp &BasePtr,
471 MachineMemOperand &BaseMMO, int64_t Offset) {
472 LLT LoadTy = Dst.getLLTTy(*getMRI());
473 MachineMemOperand *OffsetMMO =
474 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
475
476 if (Offset == 0) // This may be a size or type changing load.
477 return buildLoad(Dst, BasePtr, *OffsetMMO);
478
479 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
480 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
481 auto ConstOffset = buildConstant(OffsetTy, Offset);
482 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
483 return buildLoad(Dst, Ptr, *OffsetMMO);
484}
485
487 const SrcOp &Addr,
488 MachineMemOperand &MMO) {
489 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
490 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
491
492 auto MIB = buildInstr(TargetOpcode::G_STORE);
493 Val.addSrcToMIB(MIB);
494 Addr.addSrcToMIB(MIB);
495 MIB.addMemOperand(&MMO);
496 return MIB;
497}
498
501 MachinePointerInfo PtrInfo, Align Alignment,
503 const AAMDNodes &AAInfo) {
504 MMOFlags |= MachineMemOperand::MOStore;
505 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
506
507 LLT Ty = Val.getLLTTy(*getMRI());
508 MachineMemOperand *MMO =
509 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
510 return buildStore(Val, Addr, *MMO);
511}
512
514 const SrcOp &Op) {
515 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
516}
517
519 const SrcOp &Op) {
520 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
521}
522
524 const SrcOp &Op,
525 std::optional<unsigned> Flags) {
526 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
527}
528
529unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
530 const auto *TLI = getMF().getSubtarget().getTargetLowering();
531 switch (TLI->getBooleanContents(IsVec, IsFP)) {
533 return TargetOpcode::G_SEXT;
535 return TargetOpcode::G_ZEXT;
536 default:
537 return TargetOpcode::G_ANYEXT;
538 }
539}
540
542 const SrcOp &Op,
543 bool IsFP) {
544 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
545 return buildInstr(ExtOp, Res, Op);
546}
547
549 const SrcOp &Op,
550 bool IsVector,
551 bool IsFP) {
552 const auto *TLI = getMF().getSubtarget().getTargetLowering();
553 switch (TLI->getBooleanContents(IsVector, IsFP)) {
555 return buildSExtInReg(Res, Op, 1);
557 return buildZExtInReg(Res, Op, 1);
559 return buildCopy(Res, Op);
560 }
561
562 llvm_unreachable("unexpected BooleanContent");
563}
564
566 const DstOp &Res,
567 const SrcOp &Op) {
568 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
569 TargetOpcode::G_SEXT == ExtOpc) &&
570 "Expecting Extending Opc");
571 assert(Res.getLLTTy(*getMRI()).isScalar() ||
572 Res.getLLTTy(*getMRI()).isVector());
573 assert(Res.getLLTTy(*getMRI()).isScalar() ==
574 Op.getLLTTy(*getMRI()).isScalar());
575
576 unsigned Opcode = TargetOpcode::COPY;
577 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
578 Op.getLLTTy(*getMRI()).getSizeInBits())
579 Opcode = ExtOpc;
580 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
581 Op.getLLTTy(*getMRI()).getSizeInBits())
582 Opcode = TargetOpcode::G_TRUNC;
583 else
584 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
585
586 return buildInstr(Opcode, Res, Op);
587}
588
590 const SrcOp &Op) {
591 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
592}
593
595 const SrcOp &Op) {
596 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
597}
598
600 const SrcOp &Op) {
601 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
602}
603
605 const SrcOp &Op,
606 int64_t ImmOp) {
607 LLT ResTy = Res.getLLTTy(*getMRI());
608 auto Mask = buildConstant(
609 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
610 return buildAnd(Res, Op, Mask);
611}
612
614 const SrcOp &Src) {
615 LLT SrcTy = Src.getLLTTy(*getMRI());
616 LLT DstTy = Dst.getLLTTy(*getMRI());
617 if (SrcTy == DstTy)
618 return buildCopy(Dst, Src);
619
620 unsigned Opcode;
621 if (SrcTy.isPointerOrPointerVector())
622 Opcode = TargetOpcode::G_PTRTOINT;
623 else if (DstTy.isPointerOrPointerVector())
624 Opcode = TargetOpcode::G_INTTOPTR;
625 else {
626 assert(!SrcTy.isPointerOrPointerVector() &&
627 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
628 Opcode = TargetOpcode::G_BITCAST;
629 }
630
631 return buildInstr(Opcode, Dst, Src);
632}
633
635 const SrcOp &Src,
636 uint64_t Index) {
637 LLT SrcTy = Src.getLLTTy(*getMRI());
638 LLT DstTy = Dst.getLLTTy(*getMRI());
639
640#ifndef NDEBUG
641 assert(SrcTy.isValid() && "invalid operand type");
642 assert(DstTy.isValid() && "invalid operand type");
643 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
644 "extracting off end of register");
645#endif
646
647 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
648 assert(Index == 0 && "insertion past the end of a register");
649 return buildCast(Dst, Src);
650 }
651
652 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
653 Dst.addDefToMIB(*getMRI(), Extract);
654 Src.addSrcToMIB(Extract);
655 Extract.addImm(Index);
656 return Extract;
657}
658
660 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
661}
662
665 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
666 // we need some temporary storage for the DstOp objects. Here we use a
667 // sufficiently large SmallVector to not go through the heap.
669 assert(TmpVec.size() > 1);
670 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
671}
672
676 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
677 // we need some temporary storage for the DstOp objects. Here we use a
678 // sufficiently large SmallVector to not go through the heap.
680 assert(TmpVec.size() > 1);
681 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
682}
683
686 std::initializer_list<SrcOp> Ops) {
687 assert(Ops.size() > 1);
688 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
689}
690
691unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
692 ArrayRef<SrcOp> SrcOps) const {
693 if (DstOp.getLLTTy(*getMRI()).isVector()) {
694 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
695 return TargetOpcode::G_CONCAT_VECTORS;
696 return TargetOpcode::G_BUILD_VECTOR;
697 }
698
699 return TargetOpcode::G_MERGE_VALUES;
700}
701
703 const SrcOp &Op) {
704 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
705 // we need some temporary storage for the DstOp objects. Here we use a
706 // sufficiently large SmallVector to not go through the heap.
707 SmallVector<DstOp, 8> TmpVec(Res);
708 assert(TmpVec.size() > 1);
709 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
710}
711
713 const SrcOp &Op) {
714 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
715 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
716 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
717}
718
721 const SrcOp &Op) {
722 LLT OpTy = Op.getLLTTy(*getMRI());
723 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
724 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
725 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
726}
727
729 const SrcOp &Op) {
730 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
731 // we need some temporary storage for the DstOp objects. Here we use a
732 // sufficiently large SmallVector to not go through the heap.
733 SmallVector<DstOp, 8> TmpVec(Res);
734 assert(TmpVec.size() > 1);
735 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
736}
737
740 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
741 // we need some temporary storage for the DstOp objects. Here we use a
742 // sufficiently large SmallVector to not go through the heap.
744 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
745}
746
750 SmallVector<SrcOp> TmpVec;
751 TmpVec.reserve(Ops.size());
752 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
753 for (const auto &Op : Ops)
754 TmpVec.push_back(buildConstant(EltTy, Op));
755 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
756}
757
759 const SrcOp &Src) {
761 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
762}
763
767 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
768 // we need some temporary storage for the DstOp objects. Here we use a
769 // sufficiently large SmallVector to not go through the heap.
771 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
772 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
773 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
774 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
775}
776
778 const SrcOp &Src) {
779 LLT DstTy = Res.getLLTTy(*getMRI());
780 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
781 "Expected Src to match Dst elt ty");
782 auto UndefVec = buildUndef(DstTy);
783 auto Zero = buildConstant(LLT::scalar(64), 0);
784 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
785 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
786 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
787}
788
790 const SrcOp &Src) {
791 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
792 "Expected Src to match Dst elt ty");
793 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
794}
795
797 const SrcOp &Src1,
798 const SrcOp &Src2,
799 ArrayRef<int> Mask) {
800 LLT DstTy = Res.getLLTTy(*getMRI());
801 LLT Src1Ty = Src1.getLLTTy(*getMRI());
802 LLT Src2Ty = Src2.getLLTTy(*getMRI());
803 const LLT DstElemTy = DstTy.getScalarType();
804 const LLT ElemTy1 = Src1Ty.getScalarType();
805 const LLT ElemTy2 = Src2Ty.getScalarType();
806 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
807 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
808 (void)DstElemTy;
809 (void)ElemTy1;
810 (void)ElemTy2;
811 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
812 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
813 .addShuffleMask(MaskAlloc);
814}
815
818 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
819 // we need some temporary storage for the DstOp objects. Here we use a
820 // sufficiently large SmallVector to not go through the heap.
822 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
823}
824
826 const SrcOp &Src,
827 const SrcOp &Op,
828 unsigned Index) {
829 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
830 Res.getLLTTy(*getMRI()).getSizeInBits() &&
831 "insertion past the end of a register");
832
833 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
834 Op.getLLTTy(*getMRI()).getSizeInBits()) {
835 return buildCast(Res, Op);
836 }
837
838 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
839}
840
842 unsigned Step) {
843 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
844 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
845 APInt(Bitwidth, Step));
846 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
847 StepVector->setDebugLoc(DebugLoc());
848 Res.addDefToMIB(*getMRI(), StepVector);
849 StepVector.addCImm(CI);
850 return StepVector;
851}
852
854 unsigned MinElts) {
855
858 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
859 return buildVScale(Res, *CI);
860}
861
863 const ConstantInt &MinElts) {
864 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
865 VScale->setDebugLoc(DebugLoc());
866 Res.addDefToMIB(*getMRI(), VScale);
867 VScale.addCImm(&MinElts);
868 return VScale;
869}
870
872 const APInt &MinElts) {
873 ConstantInt *CI =
874 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
875 return buildVScale(Res, *CI);
876}
877
878static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
879 if (HasSideEffects && IsConvergent)
880 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
881 if (HasSideEffects)
882 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
883 if (IsConvergent)
884 return TargetOpcode::G_INTRINSIC_CONVERGENT;
885 return TargetOpcode::G_INTRINSIC;
886}
887
890 ArrayRef<Register> ResultRegs,
891 bool HasSideEffects, bool isConvergent) {
892 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
893 for (Register ResultReg : ResultRegs)
894 MIB.addDef(ResultReg);
895 MIB.addIntrinsicID(ID);
896 return MIB;
897}
898
901 ArrayRef<Register> ResultRegs) {
903 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
904 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
905 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
906}
907
910 bool HasSideEffects,
911 bool isConvergent) {
912 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
913 for (DstOp Result : Results)
914 Result.addDefToMIB(*getMRI(), MIB);
915 MIB.addIntrinsicID(ID);
916 return MIB;
917}
918
922 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
923 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
924 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
925}
926
929 std::optional<unsigned> Flags) {
930 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
931}
932
935 std::optional<unsigned> Flags) {
936 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
937}
938
940 const DstOp &Res,
941 const SrcOp &Op0,
942 const SrcOp &Op1,
943 std::optional<unsigned> Flags) {
944 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
945}
946
948 const DstOp &Res,
949 const SrcOp &Op0,
950 const SrcOp &Op1,
951 std::optional<unsigned> Flags) {
952
953 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
954}
955
957 const SrcOp &Op0,
958 const SrcOp &Op1) {
959 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
961
963 const SrcOp &Op0,
964 const SrcOp &Op1) {
965 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
966}
967
970 const SrcOp &Op0, const SrcOp &Op1,
971 std::optional<unsigned> Flags) {
972
973 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
977 const SrcOp &Src0,
978 const SrcOp &Src1,
979 unsigned Idx) {
980 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
981 {Src0, Src1, uint64_t(Idx)});
982}
983
985 const SrcOp &Src,
986 unsigned Idx) {
987 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
988 {Src, uint64_t(Idx)});
989}
990
993 const SrcOp &Elt, const SrcOp &Idx) {
994 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
995}
996
999 const SrcOp &Idx) {
1000 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
1001}
1002
1004 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1005 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1006#ifndef NDEBUG
1007 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1008 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1009 LLT AddrTy = Addr.getLLTTy(*getMRI());
1010 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1011 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1012 assert(OldValResTy.isScalar() && "invalid operand type");
1013 assert(SuccessResTy.isScalar() && "invalid operand type");
1014 assert(AddrTy.isPointer() && "invalid operand type");
1015 assert(CmpValTy.isValid() && "invalid operand type");
1016 assert(NewValTy.isValid() && "invalid operand type");
1017 assert(OldValResTy == CmpValTy && "type mismatch");
1018 assert(OldValResTy == NewValTy && "type mismatch");
1019#endif
1020
1021 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1022 OldValRes.addDefToMIB(*getMRI(), MIB);
1023 SuccessRes.addDefToMIB(*getMRI(), MIB);
1024 Addr.addSrcToMIB(MIB);
1025 CmpVal.addSrcToMIB(MIB);
1026 NewVal.addSrcToMIB(MIB);
1027 MIB.addMemOperand(&MMO);
1028 return MIB;
1029}
1030
1033 const SrcOp &CmpVal, const SrcOp &NewVal,
1034 MachineMemOperand &MMO) {
1035#ifndef NDEBUG
1036 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1037 LLT AddrTy = Addr.getLLTTy(*getMRI());
1038 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1039 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1040 assert(OldValResTy.isScalar() && "invalid operand type");
1041 assert(AddrTy.isPointer() && "invalid operand type");
1042 assert(CmpValTy.isValid() && "invalid operand type");
1043 assert(NewValTy.isValid() && "invalid operand type");
1044 assert(OldValResTy == CmpValTy && "type mismatch");
1045 assert(OldValResTy == NewValTy && "type mismatch");
1046#endif
1047
1048 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1049 OldValRes.addDefToMIB(*getMRI(), MIB);
1050 Addr.addSrcToMIB(MIB);
1051 CmpVal.addSrcToMIB(MIB);
1052 NewVal.addSrcToMIB(MIB);
1053 MIB.addMemOperand(&MMO);
1054 return MIB;
1055}
1056
1058 unsigned Opcode, const DstOp &OldValRes,
1059 const SrcOp &Addr, const SrcOp &Val,
1060 MachineMemOperand &MMO) {
1061
1062#ifndef NDEBUG
1063 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1064 LLT AddrTy = Addr.getLLTTy(*getMRI());
1065 LLT ValTy = Val.getLLTTy(*getMRI());
1066 assert(AddrTy.isPointer() && "invalid operand type");
1067 assert(ValTy.isValid() && "invalid operand type");
1068 assert(OldValResTy == ValTy && "type mismatch");
1069 assert(MMO.isAtomic() && "not atomic mem operand");
1070#endif
1071
1072 auto MIB = buildInstr(Opcode);
1073 OldValRes.addDefToMIB(*getMRI(), MIB);
1074 Addr.addSrcToMIB(MIB);
1075 Val.addSrcToMIB(MIB);
1076 MIB.addMemOperand(&MMO);
1077 return MIB;
1078}
1079
1082 Register Val, MachineMemOperand &MMO) {
1083 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1084 MMO);
1085}
1088 Register Val, MachineMemOperand &MMO) {
1089 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1090 MMO);
1091}
1094 Register Val, MachineMemOperand &MMO) {
1095 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1096 MMO);
1097}
1100 Register Val, MachineMemOperand &MMO) {
1101 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1102 MMO);
1103}
1106 Register Val, MachineMemOperand &MMO) {
1107 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1108 MMO);
1109}
1111 Register Addr,
1112 Register Val,
1113 MachineMemOperand &MMO) {
1114 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1115 MMO);
1116}
1119 Register Val, MachineMemOperand &MMO) {
1120 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1121 MMO);
1122}
1125 Register Val, MachineMemOperand &MMO) {
1126 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1127 MMO);
1128}
1131 Register Val, MachineMemOperand &MMO) {
1132 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1133 MMO);
1134}
1137 Register Val, MachineMemOperand &MMO) {
1138 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1139 MMO);
1140}
1143 Register Val, MachineMemOperand &MMO) {
1144 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1145 MMO);
1146}
1147
1150 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1151 MachineMemOperand &MMO) {
1152 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1153 MMO);
1154}
1155
1157MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1158 MachineMemOperand &MMO) {
1159 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1160 MMO);
1161}
1162
1165 const SrcOp &Val, MachineMemOperand &MMO) {
1166 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1167 MMO);
1168}
1169
1172 const SrcOp &Val, MachineMemOperand &MMO) {
1173 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1174 MMO);
1175}
1176
1179 const SrcOp &Addr, const SrcOp &Val,
1180 MachineMemOperand &MMO) {
1181 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1182 Val, MMO);
1183}
1184
1187 const SrcOp &Addr, const SrcOp &Val,
1188 MachineMemOperand &MMO) {
1189 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1190 Val, MMO);
1191}
1192
1194MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1195 return buildInstr(TargetOpcode::G_FENCE)
1196 .addImm(Ordering)
1197 .addImm(Scope);
1198}
1199
1201 unsigned RW,
1202 unsigned Locality,
1203 unsigned CacheType,
1204 MachineMemOperand &MMO) {
1205 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1206 Addr.addSrcToMIB(MIB);
1207 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1208 MIB.addMemOperand(&MMO);
1209 return MIB;
1210}
1211
1214#ifndef NDEBUG
1215 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1216#endif
1217
1218 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1219}
1220
1221void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1222 bool IsExtend) {
1223#ifndef NDEBUG
1224 if (DstTy.isVector()) {
1225 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1226 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1227 "different number of elements in a trunc/ext");
1228 } else
1229 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1230
1231 if (IsExtend)
1232 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1233 "invalid narrowing extend");
1234 else
1235 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1236 "invalid widening trunc");
1237#endif
1238}
1239
1240void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1241 const LLT Op0Ty, const LLT Op1Ty) {
1242#ifndef NDEBUG
1243 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1244 "invalid operand type");
1245 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1246 if (ResTy.isScalar() || ResTy.isPointer())
1247 assert(TstTy.isScalar() && "type mismatch");
1248 else
1249 assert((TstTy.isScalar() ||
1250 (TstTy.isVector() &&
1251 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1252 "type mismatch");
1253#endif
1254}
1255
1258 ArrayRef<SrcOp> SrcOps,
1259 std::optional<unsigned> Flags) {
1260 switch (Opc) {
1261 default:
1262 break;
1263 case TargetOpcode::G_SELECT: {
1264 assert(DstOps.size() == 1 && "Invalid select");
1265 assert(SrcOps.size() == 3 && "Invalid select");
1267 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1268 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1269 break;
1270 }
1271 case TargetOpcode::G_FNEG:
1272 case TargetOpcode::G_ABS:
1273 // All these are unary ops.
1274 assert(DstOps.size() == 1 && "Invalid Dst");
1275 assert(SrcOps.size() == 1 && "Invalid Srcs");
1276 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1277 SrcOps[0].getLLTTy(*getMRI()));
1278 break;
1279 case TargetOpcode::G_ADD:
1280 case TargetOpcode::G_AND:
1281 case TargetOpcode::G_MUL:
1282 case TargetOpcode::G_OR:
1283 case TargetOpcode::G_SUB:
1284 case TargetOpcode::G_XOR:
1285 case TargetOpcode::G_UDIV:
1286 case TargetOpcode::G_SDIV:
1287 case TargetOpcode::G_UREM:
1288 case TargetOpcode::G_SREM:
1289 case TargetOpcode::G_SMIN:
1290 case TargetOpcode::G_SMAX:
1291 case TargetOpcode::G_UMIN:
1292 case TargetOpcode::G_UMAX:
1293 case TargetOpcode::G_UADDSAT:
1294 case TargetOpcode::G_SADDSAT:
1295 case TargetOpcode::G_USUBSAT:
1296 case TargetOpcode::G_SSUBSAT: {
1297 // All these are binary ops.
1298 assert(DstOps.size() == 1 && "Invalid Dst");
1299 assert(SrcOps.size() == 2 && "Invalid Srcs");
1300 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1301 SrcOps[0].getLLTTy(*getMRI()),
1302 SrcOps[1].getLLTTy(*getMRI()));
1303 break;
1304 }
1305 case TargetOpcode::G_SHL:
1306 case TargetOpcode::G_ASHR:
1307 case TargetOpcode::G_LSHR:
1308 case TargetOpcode::G_USHLSAT:
1309 case TargetOpcode::G_SSHLSAT: {
1310 assert(DstOps.size() == 1 && "Invalid Dst");
1311 assert(SrcOps.size() == 2 && "Invalid Srcs");
1312 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1313 SrcOps[0].getLLTTy(*getMRI()),
1314 SrcOps[1].getLLTTy(*getMRI()));
1315 break;
1316 }
1317 case TargetOpcode::G_SEXT:
1318 case TargetOpcode::G_ZEXT:
1319 case TargetOpcode::G_ANYEXT:
1320 assert(DstOps.size() == 1 && "Invalid Dst");
1321 assert(SrcOps.size() == 1 && "Invalid Srcs");
1322 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1323 SrcOps[0].getLLTTy(*getMRI()), true);
1324 break;
1325 case TargetOpcode::G_TRUNC:
1326 case TargetOpcode::G_FPTRUNC: {
1327 assert(DstOps.size() == 1 && "Invalid Dst");
1328 assert(SrcOps.size() == 1 && "Invalid Srcs");
1329 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1330 SrcOps[0].getLLTTy(*getMRI()), false);
1331 break;
1332 }
1333 case TargetOpcode::G_BITCAST: {
1334 assert(DstOps.size() == 1 && "Invalid Dst");
1335 assert(SrcOps.size() == 1 && "Invalid Srcs");
1336 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1337 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1338 break;
1339 }
1340 case TargetOpcode::COPY:
1341 assert(DstOps.size() == 1 && "Invalid Dst");
1342 // If the caller wants to add a subreg source it has to be done separately
1343 // so we may not have any SrcOps at this point yet.
1344 break;
1345 case TargetOpcode::G_FCMP:
1346 case TargetOpcode::G_ICMP: {
1347 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1348 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1349 // For F/ICMP, the first src operand is the predicate, followed by
1350 // the two comparands.
1351 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1352 "Expecting predicate");
1353 assert([&]() -> bool {
1354 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1355 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1356 : CmpInst::isFPPredicate(Pred);
1357 }() && "Invalid predicate");
1358 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1359 "Type mismatch");
1360 assert([&]() -> bool {
1361 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1362 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1363 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1364 return DstTy.isScalar();
1365 else
1366 return DstTy.isVector() &&
1367 DstTy.getElementCount() == Op0Ty.getElementCount();
1368 }() && "Type Mismatch");
1369 break;
1370 }
1371 case TargetOpcode::G_UNMERGE_VALUES: {
1372 assert(!DstOps.empty() && "Invalid trivial sequence");
1373 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1374 assert(llvm::all_of(DstOps,
1375 [&, this](const DstOp &Op) {
1376 return Op.getLLTTy(*getMRI()) ==
1377 DstOps[0].getLLTTy(*getMRI());
1378 }) &&
1379 "type mismatch in output list");
1380 assert((TypeSize::ScalarTy)DstOps.size() *
1381 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1382 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1383 "input operands do not cover output register");
1384 break;
1385 }
1386 case TargetOpcode::G_MERGE_VALUES: {
1387 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1388 assert(DstOps.size() == 1 && "Invalid Dst");
1389 assert(llvm::all_of(SrcOps,
1390 [&, this](const SrcOp &Op) {
1391 return Op.getLLTTy(*getMRI()) ==
1392 SrcOps[0].getLLTTy(*getMRI());
1393 }) &&
1394 "type mismatch in input list");
1395 assert((TypeSize::ScalarTy)SrcOps.size() *
1396 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1397 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1398 "input operands do not cover output register");
1399 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1400 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1401 break;
1402 }
1403 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1404 assert(DstOps.size() == 1 && "Invalid Dst size");
1405 assert(SrcOps.size() == 2 && "Invalid Src size");
1406 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1407 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1408 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1409 "Invalid operand type");
1410 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1411 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1412 DstOps[0].getLLTTy(*getMRI()) &&
1413 "Type mismatch");
1414 break;
1415 }
1416 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1417 assert(DstOps.size() == 1 && "Invalid dst size");
1418 assert(SrcOps.size() == 3 && "Invalid src size");
1419 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1420 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1421 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1422 SrcOps[1].getLLTTy(*getMRI()) &&
1423 "Type mismatch");
1424 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1425 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1426 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1427 "Type mismatch");
1428 break;
1429 }
1430 case TargetOpcode::G_BUILD_VECTOR: {
1431 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1432 "Must have at least 2 operands");
1433 assert(DstOps.size() == 1 && "Invalid DstOps");
1434 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1435 "Res type must be a vector");
1436 assert(llvm::all_of(SrcOps,
1437 [&, this](const SrcOp &Op) {
1438 return Op.getLLTTy(*getMRI()) ==
1439 SrcOps[0].getLLTTy(*getMRI());
1440 }) &&
1441 "type mismatch in input list");
1442 assert((TypeSize::ScalarTy)SrcOps.size() *
1443 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1444 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1445 "input scalars do not exactly cover the output vector register");
1446 break;
1447 }
1448 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1449 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1450 "Must have at least 2 operands");
1451 assert(DstOps.size() == 1 && "Invalid DstOps");
1452 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1453 "Res type must be a vector");
1454 assert(llvm::all_of(SrcOps,
1455 [&, this](const SrcOp &Op) {
1456 return Op.getLLTTy(*getMRI()) ==
1457 SrcOps[0].getLLTTy(*getMRI());
1458 }) &&
1459 "type mismatch in input list");
1460 break;
1461 }
1462 case TargetOpcode::G_CONCAT_VECTORS: {
1463 assert(DstOps.size() == 1 && "Invalid DstOps");
1464 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1465 "Must have at least 2 operands");
1466 assert(llvm::all_of(SrcOps,
1467 [&, this](const SrcOp &Op) {
1468 return (Op.getLLTTy(*getMRI()).isVector() &&
1469 Op.getLLTTy(*getMRI()) ==
1470 SrcOps[0].getLLTTy(*getMRI()));
1471 }) &&
1472 "type mismatch in input list");
1473 assert((TypeSize::ScalarTy)SrcOps.size() *
1474 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1475 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1476 "input vectors do not exactly cover the output vector register");
1477 break;
1478 }
1479 case TargetOpcode::G_UADDE: {
1480 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1481 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1482 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1483 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1484 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1485 "Invalid operand");
1486 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1487 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1488 "type mismatch");
1489 break;
1490 }
1491 }
1492
1493 auto MIB = buildInstr(Opc);
1494 for (const DstOp &Op : DstOps)
1495 Op.addDefToMIB(*getMRI(), MIB);
1496 for (const SrcOp &Op : SrcOps)
1497 Op.addSrcToMIB(MIB);
1498 if (Flags)
1499 MIB->setFlags(*Flags);
1500 return MIB;
1501}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Function Alias Analysis Results
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:354
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:306
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:143
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:138
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:361
The address of a basic block.
Definition Constants.h:899
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
const APFloat & getValueAPF() const
Definition Constants.h:320
This is the shared class of boolean and integer constants.
Definition Constants.h:87
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:157
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
A signed pointer, in the ptrauth sense.
Definition Constants.h:1032
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1062
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1065
This is an important base class in LLVM.
Definition Constant.h:43
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
PointerType * getType() const
Global values are always pointers.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr bool isPointerOrPointerVector() const
constexpr LLT getScalarType() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
Metadata node.
Definition Metadata.h:1078
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWFMaximum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAXIMUM Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value, std::optional< unsigned > Flags=std::nullopt)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildAtomicRMWFMinimum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMINIMUM Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, / MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:217
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:224
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
DWARFExpression::Operation Op
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:657
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.