LLVM 23.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
46
52
55 const MDNode *Expr) {
56 assert(isa<DILocalVariable>(Variable) && "not a variable");
57 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
58 assert(
59 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
60 "Expected inlined-at fields to agree");
61 return insertInstr(BuildMI(getMF(), getDL(),
62 getTII().get(TargetOpcode::DBG_VALUE),
63 /*IsIndirect*/ false, Reg, Variable, Expr));
64}
65
68 const MDNode *Expr) {
69 assert(isa<DILocalVariable>(Variable) && "not a variable");
70 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
71 assert(
72 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
73 "Expected inlined-at fields to agree");
74 return insertInstr(BuildMI(getMF(), getDL(),
75 getTII().get(TargetOpcode::DBG_VALUE),
76 /*IsIndirect*/ true, Reg, Variable, Expr));
77}
78
80 const MDNode *Variable,
81 const MDNode *Expr) {
82 assert(isa<DILocalVariable>(Variable) && "not a variable");
83 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
84 assert(
85 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
86 "Expected inlined-at fields to agree");
87 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
88 .addFrameIndex(FI)
89 .addImm(0)
90 .addMetadata(Variable)
91 .addMetadata(Expr));
92}
93
95 const MDNode *Variable,
96 const MDNode *Expr) {
97 assert(isa<DILocalVariable>(Variable) && "not a variable");
98 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
99 assert(
100 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
101 "Expected inlined-at fields to agree");
102 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
103
104 auto *NumericConstant = [&] () -> const Constant* {
105 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
106 if (CE->getOpcode() == Instruction::IntToPtr)
107 return CE->getOperand(0);
108 return &C;
109 }();
110
111 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
112 if (CI->getBitWidth() > 64)
113 MIB.addCImm(CI);
114 else if (CI->getBitWidth() == 1)
115 MIB.addImm(CI->getZExtValue());
116 else
117 MIB.addImm(CI->getSExtValue());
118 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
119 MIB.addFPImm(CFP);
120 } else if (isa<ConstantPointerNull>(NumericConstant)) {
121 MIB.addImm(0);
122 } else {
123 // Insert $noreg if we didn't find a usable constant and had to drop it.
124 MIB.addReg(Register());
125 }
126
127 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
128 return insertInstr(MIB);
129}
130
132 assert(isa<DILabel>(Label) && "not a label");
133 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
134 "Expected inlined-at fields to agree");
135 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
136
137 return MIB.addMetadata(Label);
138}
139
141 const SrcOp &Size,
142 Align Alignment) {
143 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
144 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
145 Res.addDefToMIB(*getMRI(), MIB);
146 Size.addSrcToMIB(MIB);
147 MIB.addImm(Alignment.value());
148 return MIB;
149}
150
152 int Idx) {
153 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
154 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
155 Res.addDefToMIB(*getMRI(), MIB);
156 MIB.addFrameIndex(Idx);
157 return MIB;
158}
159
161 const GlobalValue *GV) {
162 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
164 GV->getType()->getAddressSpace() &&
165 "address space mismatch");
166
167 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
168 Res.addDefToMIB(*getMRI(), MIB);
169 MIB.addGlobalAddress(GV);
170 return MIB;
171}
172
174 unsigned Idx) {
175 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
176 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
177 Res.addDefToMIB(*getMRI(), MIB);
178 MIB.addConstantPoolIndex(Idx);
179 return MIB;
180}
181
183 unsigned JTI) {
184 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
185 .addJumpTableIndex(JTI);
186}
187
188void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0) && "type mismatch");
191}
192
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0 && Res == Op1) && "type mismatch");
197}
198
199void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
200 const LLT Op1) {
201 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
202 assert((Res == Op0) && "type mismatch");
203}
204
207 const SrcOp &Op1, std::optional<unsigned> Flags) {
208 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
209 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
210 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
211
212 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
213}
214
222
223std::optional<MachineInstrBuilder>
225 const LLT ValueTy, uint64_t Value,
226 std::optional<unsigned> Flags) {
227 assert(Res == 0 && "Res is a result argument");
228 assert(ValueTy.isScalar() && "invalid offset type");
229
230 if (Value == 0) {
231 Res = Op0;
232 return std::nullopt;
233 }
234
236 auto Cst = buildConstant(ValueTy, Value);
237 return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
238}
239
240std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
241 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
242 return materializePtrAdd(Res, Op0, ValueTy, Value,
245}
246
248 const SrcOp &Op0,
249 uint32_t NumBits) {
250 LLT PtrTy = Res.getLLTTy(*getMRI());
251 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
252 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
254 return buildPtrMask(Res, Op0, MaskReg);
255}
256
259 const SrcOp &Op0) {
260 LLT ResTy = Res.getLLTTy(*getMRI());
261 LLT Op0Ty = Op0.getLLTTy(*getMRI());
262
263 assert(ResTy.isVector() && "Res non vector type");
264
266 if (Op0Ty.isVector()) {
267 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
268 "Different vector element types");
269 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
270 "Op0 has more elements");
271 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
272
273 for (auto Op : Unmerge.getInstr()->defs())
274 Regs.push_back(Op.getReg());
275 } else {
276 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
277 "Op0 has more size");
278 Regs.push_back(Op0.getReg());
279 }
281 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
282 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
283 for (unsigned i = 0; i < NumberOfPadElts; ++i)
284 Regs.push_back(Undef);
285 return buildMergeLikeInstr(Res, Regs);
286}
287
290 const SrcOp &Op0) {
291 LLT ResTy = Res.getLLTTy(*getMRI());
292 LLT Op0Ty = Op0.getLLTTy(*getMRI());
293
294 assert(Op0Ty.isVector() && "Non vector type");
295 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
296 (ResTy.isVector() &&
297 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
298 "Different vector element types");
299 assert(
300 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
301 "Op0 has fewer elements");
302
303 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
304 if (ResTy.isScalar())
305 return buildCopy(Res, Unmerge.getReg(0));
307 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
308 Regs.push_back(Unmerge.getReg(i));
309 return buildMergeLikeInstr(Res, Regs);
310}
311
313 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
314}
315
317 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
318 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
319}
320
322 unsigned JTI,
323 Register IndexReg) {
324 assert(getMRI()->getType(TablePtr).isPointer() &&
325 "Table reg must be a pointer");
326 return buildInstr(TargetOpcode::G_BRJT)
327 .addUse(TablePtr)
329 .addUse(IndexReg);
330}
331
333 const SrcOp &Op) {
334 return buildInstr(TargetOpcode::COPY, Res, Op);
335}
336
338 const ConstantInt &Val) {
339 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
340 LLT Ty = Res.getLLTTy(*getMRI());
341 LLT EltTy = Ty.getScalarType();
342 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
343 "creating constant with the wrong size");
344
345 assert(!Ty.isScalableVector() &&
346 "unexpected scalable vector in buildConstant");
347
348 if (Ty.isFixedVector()) {
349 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
350 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
351 .addCImm(&Val);
352 return buildSplatBuildVector(Res, Const);
353 }
354
355 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
356 Const->setDebugLoc(DebugLoc());
357 Res.addDefToMIB(*getMRI(), Const);
358 Const.addCImm(&Val);
359 return Const;
360}
361
363 int64_t Val) {
366 // TODO: Avoid implicit trunc?
367 // See https://github.com/llvm/llvm-project/issues/112510.
368 ConstantInt *CI = ConstantInt::getSigned(IntN, Val, /*implicitTrunc=*/true);
369 return buildConstant(Res, *CI);
370}
371
373 const ConstantFP &Val) {
374 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
375 LLT Ty = Res.getLLTTy(*getMRI());
376 LLT EltTy = Ty.getScalarType();
377
379 == EltTy.getSizeInBits() &&
380 "creating fconstant with the wrong size");
381
382 assert(!Ty.isPointer() && "invalid operand type");
383
384 assert(!Ty.isScalableVector() &&
385 "unexpected scalable vector in buildFConstant");
386
387 if (Ty.isFixedVector()) {
388 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
389 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
390 .addFPImm(&Val);
391
392 return buildSplatBuildVector(Res, Const);
393 }
394
395 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
396 Const->setDebugLoc(DebugLoc());
397 Res.addDefToMIB(*getMRI(), Const);
398 Const.addFPImm(&Val);
399 return Const;
400}
401
403 const APInt &Val) {
404 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
405 return buildConstant(Res, *CI);
406}
407
409 double Val) {
410 LLT DstTy = Res.getLLTTy(*getMRI());
411 auto &Ctx = getMF().getFunction().getContext();
412 auto *CFP =
413 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
414 return buildFConstant(Res, *CFP);
415}
416
418 const APFloat &Val) {
419 auto &Ctx = getMF().getFunction().getContext();
420 auto *CFP = ConstantFP::get(Ctx, Val);
421 return buildFConstant(Res, *CFP);
422}
423
426 const ConstantPtrAuth *CPA,
427 Register Addr, Register AddrDisc) {
428 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
429 Res.addDefToMIB(*getMRI(), MIB);
430 MIB.addUse(Addr);
431 MIB.addImm(CPA->getKey()->getZExtValue());
432 MIB.addUse(AddrDisc);
433 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
434 return MIB;
435}
436
438 MachineBasicBlock &Dest) {
439 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
440
441 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
442 Tst.addSrcToMIB(MIB);
443 MIB.addMBB(&Dest);
444 return MIB;
445}
446
449 MachinePointerInfo PtrInfo, Align Alignment,
451 const AAMDNodes &AAInfo) {
452 MMOFlags |= MachineMemOperand::MOLoad;
453 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
454
455 LLT Ty = Dst.getLLTTy(*getMRI());
456 MachineMemOperand *MMO =
457 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
458 return buildLoad(Dst, Addr, *MMO);
459}
460
462 const DstOp &Res,
463 const SrcOp &Addr,
464 MachineMemOperand &MMO) {
465 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
466 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
467
468 auto MIB = buildInstr(Opcode);
469 Res.addDefToMIB(*getMRI(), MIB);
470 Addr.addSrcToMIB(MIB);
471 MIB.addMemOperand(&MMO);
472 return MIB;
473}
474
476 const DstOp &Dst, const SrcOp &BasePtr,
477 MachineMemOperand &BaseMMO, int64_t Offset) {
478 LLT LoadTy = Dst.getLLTTy(*getMRI());
479 MachineMemOperand *OffsetMMO =
480 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
481
482 if (Offset == 0) // This may be a size or type changing load.
483 return buildLoad(Dst, BasePtr, *OffsetMMO);
484
485 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
486 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
487 auto ConstOffset = buildConstant(OffsetTy, Offset);
488 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
489 return buildLoad(Dst, Ptr, *OffsetMMO);
490}
491
493 const SrcOp &Addr,
494 MachineMemOperand &MMO) {
495 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
496 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
497
498 auto MIB = buildInstr(TargetOpcode::G_STORE);
499 Val.addSrcToMIB(MIB);
500 Addr.addSrcToMIB(MIB);
501 MIB.addMemOperand(&MMO);
502 return MIB;
503}
504
506 const SrcOp &Val,
507 const SrcOp &Addr,
508 MachineMemOperand &MMO) {
509 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
510 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
511
512 auto MIB = buildInstr(Opcode);
513 Val.addSrcToMIB(MIB);
514 Addr.addSrcToMIB(MIB);
515 MIB.addMemOperand(&MMO);
516 return MIB;
517}
518
521 MachinePointerInfo PtrInfo, Align Alignment,
523 const AAMDNodes &AAInfo) {
524 MMOFlags |= MachineMemOperand::MOStore;
525 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
526
527 LLT Ty = Val.getLLTTy(*getMRI());
528 MachineMemOperand *MMO =
529 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
530 return buildStore(Val, Addr, *MMO);
531}
532
534 const SrcOp &Op) {
535 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
536}
537
539 const SrcOp &Op) {
540 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
541}
542
544 const SrcOp &Op,
545 std::optional<unsigned> Flags) {
546 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
547}
548
549unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
550 const auto *TLI = getMF().getSubtarget().getTargetLowering();
551 switch (TLI->getBooleanContents(IsVec, IsFP)) {
553 return TargetOpcode::G_SEXT;
555 return TargetOpcode::G_ZEXT;
556 default:
557 return TargetOpcode::G_ANYEXT;
558 }
559}
560
562 const SrcOp &Op,
563 bool IsFP) {
564 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
565 return buildInstr(ExtOp, Res, Op);
566}
567
569 const SrcOp &Op,
570 bool IsVector,
571 bool IsFP) {
572 const auto *TLI = getMF().getSubtarget().getTargetLowering();
573 switch (TLI->getBooleanContents(IsVector, IsFP)) {
575 return buildSExtInReg(Res, Op, 1);
577 return buildZExtInReg(Res, Op, 1);
579 return buildCopy(Res, Op);
580 }
581
582 llvm_unreachable("unexpected BooleanContent");
583}
584
586 const DstOp &Res,
587 const SrcOp &Op) {
588 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
589 TargetOpcode::G_SEXT == ExtOpc) &&
590 "Expecting Extending Opc");
591 assert(Res.getLLTTy(*getMRI()).isScalar() ||
592 Res.getLLTTy(*getMRI()).isVector());
593 assert(Res.getLLTTy(*getMRI()).isScalar() ==
594 Op.getLLTTy(*getMRI()).isScalar());
595
596 unsigned Opcode = TargetOpcode::COPY;
597 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
598 Op.getLLTTy(*getMRI()).getSizeInBits())
599 Opcode = ExtOpc;
600 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
601 Op.getLLTTy(*getMRI()).getSizeInBits())
602 Opcode = TargetOpcode::G_TRUNC;
603 else
604 assert(Res.getLLTTy(*getMRI()).getSizeInBits() ==
605 Op.getLLTTy(*getMRI()).getSizeInBits());
606
607 return buildInstr(Opcode, Res, Op);
608}
609
611 const SrcOp &Op) {
612 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
613}
614
616 const SrcOp &Op) {
617 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
618}
619
621 const SrcOp &Op) {
622 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
623}
624
626 const SrcOp &Op,
627 int64_t ImmOp) {
628 LLT ResTy = Res.getLLTTy(*getMRI());
629 auto Mask = buildConstant(
630 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
631 return buildAnd(Res, Op, Mask);
632}
633
635 const SrcOp &Src) {
636 LLT SrcTy = Src.getLLTTy(*getMRI());
637 LLT DstTy = Dst.getLLTTy(*getMRI());
638 if (SrcTy == DstTy)
639 return buildCopy(Dst, Src);
640
641 unsigned Opcode;
642 if (SrcTy.isPointerOrPointerVector())
643 Opcode = TargetOpcode::G_PTRTOINT;
644 else if (DstTy.isPointerOrPointerVector())
645 Opcode = TargetOpcode::G_INTTOPTR;
646 else {
647 assert(!SrcTy.isPointerOrPointerVector() &&
648 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
649 Opcode = TargetOpcode::G_BITCAST;
650 }
651
652 return buildInstr(Opcode, Dst, Src);
653}
654
656 const SrcOp &Src,
657 uint64_t Index) {
658 LLT SrcTy = Src.getLLTTy(*getMRI());
659 LLT DstTy = Dst.getLLTTy(*getMRI());
660
661#ifndef NDEBUG
662 assert(SrcTy.isValid() && "invalid operand type");
663 assert(DstTy.isValid() && "invalid operand type");
664 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
665 "extracting off end of register");
666#endif
667
668 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
669 assert(Index == 0 && "insertion past the end of a register");
670 return buildCast(Dst, Src);
671 }
672
673 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
674 Dst.addDefToMIB(*getMRI(), Extract);
675 Src.addSrcToMIB(Extract);
676 Extract.addImm(Index);
677 return Extract;
678}
679
681 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
682}
683
686 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
687 // we need some temporary storage for the DstOp objects. Here we use a
688 // sufficiently large SmallVector to not go through the heap.
690 assert(TmpVec.size() > 1);
691 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
692}
693
697 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
698 // we need some temporary storage for the DstOp objects. Here we use a
699 // sufficiently large SmallVector to not go through the heap.
701 assert(TmpVec.size() > 1);
702 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
703}
704
707 std::initializer_list<SrcOp> Ops) {
708 assert(Ops.size() > 1);
709 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
710}
711
712unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
713 ArrayRef<SrcOp> SrcOps) const {
714 if (DstOp.getLLTTy(*getMRI()).isVector()) {
715 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
716 return TargetOpcode::G_CONCAT_VECTORS;
717 return TargetOpcode::G_BUILD_VECTOR;
718 }
719
720 return TargetOpcode::G_MERGE_VALUES;
721}
722
724 const SrcOp &Op) {
725 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
726 // we need some temporary storage for the DstOp objects. Here we use a
727 // sufficiently large SmallVector to not go through the heap.
728 SmallVector<DstOp, 8> TmpVec(Res);
729 assert(TmpVec.size() > 1);
730 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
731}
732
734 const SrcOp &Op) {
735 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
736 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
737 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
738}
739
742 const SrcOp &Op) {
743 LLT OpTy = Op.getLLTTy(*getMRI());
744 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
745 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
746 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
747}
748
750 const SrcOp &Op) {
751 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
752 // we need some temporary storage for the DstOp objects. Here we use a
753 // sufficiently large SmallVector to not go through the heap.
754 SmallVector<DstOp, 8> TmpVec(Res);
755 assert(TmpVec.size() > 1);
756 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
757}
758
761 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
762 // we need some temporary storage for the DstOp objects. Here we use a
763 // sufficiently large SmallVector to not go through the heap.
765 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
766}
767
771 SmallVector<SrcOp> TmpVec;
772 TmpVec.reserve(Ops.size());
773 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
774 for (const auto &Op : Ops)
775 TmpVec.push_back(buildConstant(EltTy, Op));
776 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
777}
778
780 const SrcOp &Src) {
782 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
783}
784
788 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
789 // we need some temporary storage for the DstOp objects. Here we use a
790 // sufficiently large SmallVector to not go through the heap.
792 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
793 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
794 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
795 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
796}
797
799 const SrcOp &Src) {
800 LLT DstTy = Res.getLLTTy(*getMRI());
801 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
802 "Expected Src to match Dst elt ty");
803 auto UndefVec = buildUndef(DstTy);
804 auto Zero = buildConstant(LLT::integer(64), 0);
805 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
806 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
807 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
808}
809
811 const SrcOp &Src) {
812 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
813 "Expected Src to match Dst elt ty");
814 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
815}
816
818 const SrcOp &Src1,
819 const SrcOp &Src2,
820 ArrayRef<int> Mask) {
821 LLT DstTy = Res.getLLTTy(*getMRI());
822 LLT Src1Ty = Src1.getLLTTy(*getMRI());
823 LLT Src2Ty = Src2.getLLTTy(*getMRI());
824 const LLT DstElemTy = DstTy.getScalarType();
825 const LLT ElemTy1 = Src1Ty.getScalarType();
826 const LLT ElemTy2 = Src2Ty.getScalarType();
827 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
828 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
829 (void)DstElemTy;
830 (void)ElemTy1;
831 (void)ElemTy2;
832 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
833 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
834 .addShuffleMask(MaskAlloc);
835}
836
839 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
840 // we need some temporary storage for the DstOp objects. Here we use a
841 // sufficiently large SmallVector to not go through the heap.
843 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
844}
845
847 const SrcOp &Src,
848 const SrcOp &Op,
849 unsigned Index) {
850 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
851 Res.getLLTTy(*getMRI()).getSizeInBits() &&
852 "insertion past the end of a register");
853
854 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
855 Op.getLLTTy(*getMRI()).getSizeInBits()) {
856 return buildCast(Res, Op);
857 }
858
859 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
860}
861
863 unsigned Step) {
864 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
865 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
866 APInt(Bitwidth, Step));
867 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
868 StepVector->setDebugLoc(DebugLoc());
869 Res.addDefToMIB(*getMRI(), StepVector);
870 StepVector.addCImm(CI);
871 return StepVector;
872}
873
875 unsigned MinElts) {
876
879 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
880 return buildVScale(Res, *CI);
881}
882
884 const ConstantInt &MinElts) {
885 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
886 VScale->setDebugLoc(DebugLoc());
887 Res.addDefToMIB(*getMRI(), VScale);
888 VScale.addCImm(&MinElts);
889 return VScale;
890}
891
893 const APInt &MinElts) {
894 ConstantInt *CI =
895 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
896 return buildVScale(Res, *CI);
897}
898
899static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
900 if (HasSideEffects && IsConvergent)
901 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
902 if (HasSideEffects)
903 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
904 if (IsConvergent)
905 return TargetOpcode::G_INTRINSIC_CONVERGENT;
906 return TargetOpcode::G_INTRINSIC;
907}
908
911 ArrayRef<Register> ResultRegs,
912 bool HasSideEffects, bool isConvergent) {
913 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
914 for (Register ResultReg : ResultRegs)
915 MIB.addDef(ResultReg);
916 MIB.addIntrinsicID(ID);
917 return MIB;
918}
919
922 ArrayRef<Register> ResultRegs) {
924 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
925 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
926 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
927}
928
931 bool HasSideEffects,
932 bool isConvergent) {
933 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
934 for (DstOp Result : Results)
935 Result.addDefToMIB(*getMRI(), MIB);
936 MIB.addIntrinsicID(ID);
937 return MIB;
938}
939
943 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
944 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
945 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
946}
947
950 std::optional<unsigned> Flags) {
951 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
952}
953
956 std::optional<unsigned> Flags) {
957 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
958}
959
961 const DstOp &Res,
962 const SrcOp &Op0,
963 const SrcOp &Op1,
964 std::optional<unsigned> Flags) {
965 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
966}
967
969 const DstOp &Res,
970 const SrcOp &Op0,
971 const SrcOp &Op1,
972 std::optional<unsigned> Flags) {
973
974 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
975}
976
978 const SrcOp &Op0,
979 const SrcOp &Op1) {
980 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
981}
982
984 const SrcOp &Op0,
985 const SrcOp &Op1) {
986 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
987}
988
991 const SrcOp &Op0, const SrcOp &Op1,
992 std::optional<unsigned> Flags) {
993
994 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
995}
996
998 const SrcOp &Src0,
999 const SrcOp &Src1,
1000 unsigned Idx) {
1001 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
1002 {Src0, Src1, uint64_t(Idx)});
1003}
1004
1006 const SrcOp &Src,
1007 unsigned Idx) {
1008 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
1009 {Src, uint64_t(Idx)});
1010}
1011
1014 const SrcOp &Elt, const SrcOp &Idx) {
1015 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
1016}
1017
1020 const SrcOp &Idx) {
1021 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
1022}
1023
1025 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1026 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1027#ifndef NDEBUG
1028 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1029 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1030 LLT AddrTy = Addr.getLLTTy(*getMRI());
1031 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1032 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1033 assert(OldValResTy.isScalar() && "invalid operand type");
1034 assert(SuccessResTy.isScalar() && "invalid operand type");
1035 assert(AddrTy.isPointer() && "invalid operand type");
1036 assert(CmpValTy.isValid() && "invalid operand type");
1037 assert(NewValTy.isValid() && "invalid operand type");
1038 assert(OldValResTy == CmpValTy && "type mismatch");
1039 assert(OldValResTy == NewValTy && "type mismatch");
1040#endif
1041
1042 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1043 OldValRes.addDefToMIB(*getMRI(), MIB);
1044 SuccessRes.addDefToMIB(*getMRI(), MIB);
1045 Addr.addSrcToMIB(MIB);
1046 CmpVal.addSrcToMIB(MIB);
1047 NewVal.addSrcToMIB(MIB);
1048 MIB.addMemOperand(&MMO);
1049 return MIB;
1050}
1051
1054 const SrcOp &CmpVal, const SrcOp &NewVal,
1055 MachineMemOperand &MMO) {
1056#ifndef NDEBUG
1057 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1058 LLT AddrTy = Addr.getLLTTy(*getMRI());
1059 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1060 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1061 assert(OldValResTy.isScalar() && "invalid operand type");
1062 assert(AddrTy.isPointer() && "invalid operand type");
1063 assert(CmpValTy.isValid() && "invalid operand type");
1064 assert(NewValTy.isValid() && "invalid operand type");
1065 assert(OldValResTy == CmpValTy && "type mismatch");
1066 assert(OldValResTy == NewValTy && "type mismatch");
1067#endif
1068
1069 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1070 OldValRes.addDefToMIB(*getMRI(), MIB);
1071 Addr.addSrcToMIB(MIB);
1072 CmpVal.addSrcToMIB(MIB);
1073 NewVal.addSrcToMIB(MIB);
1074 MIB.addMemOperand(&MMO);
1075 return MIB;
1076}
1077
1079 unsigned Opcode, const DstOp &OldValRes,
1080 const SrcOp &Addr, const SrcOp &Val,
1081 MachineMemOperand &MMO) {
1082
1083#ifndef NDEBUG
1084 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1085 LLT AddrTy = Addr.getLLTTy(*getMRI());
1086 LLT ValTy = Val.getLLTTy(*getMRI());
1087 assert(AddrTy.isPointer() && "invalid operand type");
1088 assert(ValTy.isValid() && "invalid operand type");
1089 assert(OldValResTy == ValTy && "type mismatch");
1090 assert(MMO.isAtomic() && "not atomic mem operand");
1091#endif
1092
1093 auto MIB = buildInstr(Opcode);
1094 OldValRes.addDefToMIB(*getMRI(), MIB);
1095 Addr.addSrcToMIB(MIB);
1096 Val.addSrcToMIB(MIB);
1097 MIB.addMemOperand(&MMO);
1098 return MIB;
1099}
1100
1103 Register Val, MachineMemOperand &MMO) {
1104 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1105 MMO);
1106}
1109 Register Val, MachineMemOperand &MMO) {
1110 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1111 MMO);
1112}
1115 Register Val, MachineMemOperand &MMO) {
1116 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1117 MMO);
1118}
1121 Register Val, MachineMemOperand &MMO) {
1122 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1123 MMO);
1124}
1127 Register Val, MachineMemOperand &MMO) {
1128 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1129 MMO);
1130}
1132 Register Addr,
1133 Register Val,
1134 MachineMemOperand &MMO) {
1135 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1136 MMO);
1137}
1140 Register Val, MachineMemOperand &MMO) {
1141 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1142 MMO);
1143}
1146 Register Val, MachineMemOperand &MMO) {
1147 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1148 MMO);
1149}
1152 Register Val, MachineMemOperand &MMO) {
1153 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1154 MMO);
1155}
1158 Register Val, MachineMemOperand &MMO) {
1159 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1160 MMO);
1161}
1164 Register Val, MachineMemOperand &MMO) {
1165 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1166 MMO);
1167}
1168
1171 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1172 MachineMemOperand &MMO) {
1173 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1174 MMO);
1175}
1176
1178MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1179 MachineMemOperand &MMO) {
1180 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1181 MMO);
1182}
1183
1186 const SrcOp &Val, MachineMemOperand &MMO) {
1187 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1188 MMO);
1189}
1190
1193 const SrcOp &Val, MachineMemOperand &MMO) {
1194 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1195 MMO);
1196}
1197
1200 const SrcOp &Addr, const SrcOp &Val,
1201 MachineMemOperand &MMO) {
1202 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1203 Val, MMO);
1204}
1205
1208 const SrcOp &Addr, const SrcOp &Val,
1209 MachineMemOperand &MMO) {
1210 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1211 Val, MMO);
1212}
1213
1215MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1216 return buildInstr(TargetOpcode::G_FENCE)
1217 .addImm(Ordering)
1218 .addImm(Scope);
1219}
1220
1222 unsigned RW,
1223 unsigned Locality,
1224 unsigned CacheType,
1225 MachineMemOperand &MMO) {
1226 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1227 Addr.addSrcToMIB(MIB);
1228 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1229 MIB.addMemOperand(&MMO);
1230 return MIB;
1231}
1232
1235#ifndef NDEBUG
1236 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1237#endif
1238
1239 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1240}
1241
1242void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1243 bool IsExtend) {
1244#ifndef NDEBUG
1245 if (DstTy.isVector()) {
1246 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1247 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1248 "different number of elements in a trunc/ext");
1249 } else
1250 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1251
1252 if (IsExtend)
1253 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1254 "invalid narrowing extend");
1255 else
1256 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1257 "invalid widening trunc");
1258#endif
1259}
1260
1261void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1262 const LLT Op0Ty, const LLT Op1Ty) {
1263#ifndef NDEBUG
1264 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1265 "invalid operand type");
1266 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1267 if (ResTy.isScalar() || ResTy.isPointer())
1268 assert(TstTy.isScalar() && "type mismatch");
1269 else
1270 assert((TstTy.isScalar() ||
1271 (TstTy.isVector() &&
1272 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1273 "type mismatch");
1274#endif
1275}
1276
1279 ArrayRef<SrcOp> SrcOps,
1280 std::optional<unsigned> Flags) {
1281 switch (Opc) {
1282 default:
1283 break;
1284 case TargetOpcode::G_SELECT: {
1285 assert(DstOps.size() == 1 && "Invalid select");
1286 assert(SrcOps.size() == 3 && "Invalid select");
1288 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1289 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1290 break;
1291 }
1292 case TargetOpcode::G_FNEG:
1293 case TargetOpcode::G_ABS:
1294 // All these are unary ops.
1295 assert(DstOps.size() == 1 && "Invalid Dst");
1296 assert(SrcOps.size() == 1 && "Invalid Srcs");
1297 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1298 SrcOps[0].getLLTTy(*getMRI()));
1299 break;
1300 case TargetOpcode::G_ADD:
1301 case TargetOpcode::G_AND:
1302 case TargetOpcode::G_MUL:
1303 case TargetOpcode::G_OR:
1304 case TargetOpcode::G_SUB:
1305 case TargetOpcode::G_XOR:
1306 case TargetOpcode::G_UDIV:
1307 case TargetOpcode::G_SDIV:
1308 case TargetOpcode::G_UREM:
1309 case TargetOpcode::G_SREM:
1310 case TargetOpcode::G_SMIN:
1311 case TargetOpcode::G_SMAX:
1312 case TargetOpcode::G_UMIN:
1313 case TargetOpcode::G_UMAX:
1314 case TargetOpcode::G_UADDSAT:
1315 case TargetOpcode::G_SADDSAT:
1316 case TargetOpcode::G_USUBSAT:
1317 case TargetOpcode::G_SSUBSAT: {
1318 // All these are binary ops.
1319 assert(DstOps.size() == 1 && "Invalid Dst");
1320 assert(SrcOps.size() == 2 && "Invalid Srcs");
1321 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1322 SrcOps[0].getLLTTy(*getMRI()),
1323 SrcOps[1].getLLTTy(*getMRI()));
1324 break;
1325 }
1326 case TargetOpcode::G_SHL:
1327 case TargetOpcode::G_ASHR:
1328 case TargetOpcode::G_LSHR:
1329 case TargetOpcode::G_USHLSAT:
1330 case TargetOpcode::G_SSHLSAT: {
1331 assert(DstOps.size() == 1 && "Invalid Dst");
1332 assert(SrcOps.size() == 2 && "Invalid Srcs");
1333 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1334 SrcOps[0].getLLTTy(*getMRI()),
1335 SrcOps[1].getLLTTy(*getMRI()));
1336 break;
1337 }
1338 case TargetOpcode::G_SEXT:
1339 case TargetOpcode::G_ZEXT:
1340 case TargetOpcode::G_ANYEXT:
1341 assert(DstOps.size() == 1 && "Invalid Dst");
1342 assert(SrcOps.size() == 1 && "Invalid Srcs");
1343 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1344 SrcOps[0].getLLTTy(*getMRI()), true);
1345 break;
1346 case TargetOpcode::G_TRUNC:
1347 case TargetOpcode::G_FPTRUNC: {
1348 assert(DstOps.size() == 1 && "Invalid Dst");
1349 assert(SrcOps.size() == 1 && "Invalid Srcs");
1350 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1351 SrcOps[0].getLLTTy(*getMRI()), false);
1352 break;
1353 }
1354 case TargetOpcode::G_BITCAST: {
1355 assert(DstOps.size() == 1 && "Invalid Dst");
1356 assert(SrcOps.size() == 1 && "Invalid Srcs");
1357 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1358 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1359 break;
1360 }
1361 case TargetOpcode::COPY:
1362 assert(DstOps.size() == 1 && "Invalid Dst");
1363 // If the caller wants to add a subreg source it has to be done separately
1364 // so we may not have any SrcOps at this point yet.
1365 break;
1366 case TargetOpcode::G_FCMP:
1367 case TargetOpcode::G_ICMP: {
1368 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1369 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1370 // For F/ICMP, the first src operand is the predicate, followed by
1371 // the two comparands.
1372 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1373 "Expecting predicate");
1374 assert([&]() -> bool {
1375 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1376 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1377 : CmpInst::isFPPredicate(Pred);
1378 }() && "Invalid predicate");
1379 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1380 "Type mismatch");
1381 assert([&]() -> bool {
1382 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1383 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1384 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1385 return DstTy.isScalar();
1386 else
1387 return DstTy.isVector() &&
1388 DstTy.getElementCount() == Op0Ty.getElementCount();
1389 }() && "Type Mismatch");
1390 break;
1391 }
1392 case TargetOpcode::G_UNMERGE_VALUES: {
1393 assert(!DstOps.empty() && "Invalid trivial sequence");
1394 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1395 assert(llvm::all_of(DstOps,
1396 [&, this](const DstOp &Op) {
1397 return Op.getLLTTy(*getMRI()) ==
1398 DstOps[0].getLLTTy(*getMRI());
1399 }) &&
1400 "type mismatch in output list");
1401 assert((TypeSize::ScalarTy)DstOps.size() *
1402 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1403 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1404 "input operands do not cover output register");
1405 break;
1406 }
1407 case TargetOpcode::G_MERGE_VALUES: {
1408 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1409 assert(DstOps.size() == 1 && "Invalid Dst");
1410 assert(llvm::all_of(SrcOps,
1411 [&, this](const SrcOp &Op) {
1412 return Op.getLLTTy(*getMRI()) ==
1413 SrcOps[0].getLLTTy(*getMRI());
1414 }) &&
1415 "type mismatch in input list");
1416 assert((TypeSize::ScalarTy)SrcOps.size() *
1417 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1418 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1419 "input operands do not cover output register");
1420 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1421 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1422 break;
1423 }
1424 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1425 assert(DstOps.size() == 1 && "Invalid Dst size");
1426 assert(SrcOps.size() == 2 && "Invalid Src size");
1427 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1428 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1429 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1430 "Invalid operand type");
1431 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1432 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1433 DstOps[0].getLLTTy(*getMRI()) &&
1434 "Type mismatch");
1435 break;
1436 }
1437 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1438 assert(DstOps.size() == 1 && "Invalid dst size");
1439 assert(SrcOps.size() == 3 && "Invalid src size");
1440 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1441 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1442 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1443 SrcOps[1].getLLTTy(*getMRI()) &&
1444 "Type mismatch");
1445 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1446 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1447 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1448 "Type mismatch");
1449 break;
1450 }
1451 case TargetOpcode::G_INSERT_SUBVECTOR: {
1452 assert(DstOps.size() == 1 && "Invalid Dst");
1453 assert(SrcOps.size() == 3 && "Invalid Srcs");
1454 [[maybe_unused]] LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1455 [[maybe_unused]] LLT BigVecTy = SrcOps[0].getLLTTy(*getMRI());
1456 [[maybe_unused]] LLT SubVecTy = SrcOps[1].getLLTTy(*getMRI());
1457 assert(DstTy == BigVecTy &&
1458 "Dest and insert subvector source types must match!");
1459 assert(DstTy.isVector() && SubVecTy.isVector() &&
1460 "Insert subvector VTs must be vectors!");
1461 assert(DstTy.getElementType() == SubVecTy.getElementType() &&
1462 "Insert subvector VTs must have the same element type!");
1463 assert((DstTy.isScalable() || !SubVecTy.isScalable()) &&
1464 "Cannot insert a scalable vector into a fixed length vector!");
1465 assert((DstTy.isScalable() != SubVecTy.isScalable() ||
1467 SubVecTy.getElementCount().getKnownMinValue()) &&
1468 "Insert subvector must be from smaller vector to larger vector!");
1469 assert(SrcOps[2].getSrcOpKind() == SrcOp::SrcType::Ty_Imm &&
1470 "Insert subvector index must be constant");
1471 assert((DstTy.isScalable() != SubVecTy.isScalable() ||
1472 (SubVecTy.getElementCount().getKnownMinValue() +
1473 (uint64_t)SrcOps[2].getImm()) <=
1474 DstTy.getElementCount().getKnownMinValue()) &&
1475 "Insert subvector overflow!");
1476 assert((uint64_t)SrcOps[2].getImm() %
1477 SubVecTy.getElementCount().getKnownMinValue() ==
1478 0 &&
1479 "Insert index is not a multiple of the subvector length");
1480 break;
1481 }
1482 case TargetOpcode::G_EXTRACT_SUBVECTOR: {
1483 assert(DstOps.size() == 1 && "Invalid Dst");
1484 assert(SrcOps.size() == 2 && "Invalid Srcs");
1485 [[maybe_unused]] LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1486 [[maybe_unused]] LLT SrcVecTy = SrcOps[0].getLLTTy(*getMRI());
1487 assert(DstTy.isVector() && SrcVecTy.isVector() &&
1488 "Extract subvector VTs must be vectors!");
1489 assert(DstTy.getElementType() == SrcVecTy.getElementType() &&
1490 "Extract subvector VTs must have the same element type!");
1491 assert((!DstTy.isScalable() || SrcVecTy.isScalable()) &&
1492 "Cannot extract a scalable vector from a fixed length vector!");
1493 assert((DstTy.isScalable() != SrcVecTy.isScalable() ||
1495 SrcVecTy.getElementCount().getKnownMinValue()) &&
1496 "Extract subvector must be from larger vector to smaller vector!");
1497 assert(SrcOps[1].getSrcOpKind() == SrcOp::SrcType::Ty_Imm &&
1498 "Extract subvector index must be a constant");
1499 assert((DstTy.isScalable() != SrcVecTy.isScalable() ||
1501 (uint64_t)SrcOps[1].getImm()) <=
1502 SrcVecTy.getElementCount().getKnownMinValue()) &&
1503 "Extract subvector overflow!");
1504 assert((uint64_t)SrcOps[1].getImm() %
1506 0 &&
1507 "Extract index is not a multiple of the output vector length");
1508 break;
1509 }
1510 case TargetOpcode::G_BUILD_VECTOR: {
1511 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1512 "Must have at least 2 operands");
1513 assert(DstOps.size() == 1 && "Invalid DstOps");
1514 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1515 "Res type must be a vector");
1516 assert(llvm::all_of(SrcOps,
1517 [&, this](const SrcOp &Op) {
1518 return Op.getLLTTy(*getMRI()) ==
1519 SrcOps[0].getLLTTy(*getMRI());
1520 }) &&
1521 "type mismatch in input list");
1522 assert((TypeSize::ScalarTy)SrcOps.size() *
1523 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1524 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1525 "input scalars do not exactly cover the output vector register");
1526 break;
1527 }
1528 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1529 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1530 "Must have at least 2 operands");
1531 assert(DstOps.size() == 1 && "Invalid DstOps");
1532 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1533 "Res type must be a vector");
1534 assert(llvm::all_of(SrcOps,
1535 [&, this](const SrcOp &Op) {
1536 return Op.getLLTTy(*getMRI()) ==
1537 SrcOps[0].getLLTTy(*getMRI());
1538 }) &&
1539 "type mismatch in input list");
1540 break;
1541 }
1542 case TargetOpcode::G_CONCAT_VECTORS: {
1543 assert(DstOps.size() == 1 && "Invalid DstOps");
1544 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1545 "Must have at least 2 operands");
1546 assert(llvm::all_of(SrcOps,
1547 [&, this](const SrcOp &Op) {
1548 return (Op.getLLTTy(*getMRI()).isVector() &&
1549 Op.getLLTTy(*getMRI()) ==
1550 SrcOps[0].getLLTTy(*getMRI()));
1551 }) &&
1552 "type mismatch in input list");
1553 assert((TypeSize::ScalarTy)SrcOps.size() *
1554 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1555 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1556 "input vectors do not exactly cover the output vector register");
1557 break;
1558 }
1559 case TargetOpcode::G_UADDE: {
1560 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1561 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1562 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1563 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1564 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1565 "Invalid operand");
1566 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1567 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1568 "type mismatch");
1569 break;
1570 }
1571 }
1572
1573 auto MIB = buildInstr(Opc);
1574 for (const DstOp &Op : DstOps)
1575 Op.addDefToMIB(*getMRI(), MIB);
1576 for (const SrcOp &Op : SrcOps)
1577 Op.addSrcToMIB(MIB);
1578 if (Flags)
1579 MIB->setFlags(*Flags);
1580 return MIB;
1581}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Function Alias Analysis Results
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:278
const fltSemantics & getSemantics() const
Definition APFloat.h:1542
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
The address of a basic block.
Definition Constants.h:1065
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
A signed pointer, in the ptrauth sense.
Definition Constants.h:1198
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
This is an important base class in LLVM.
Definition Constant.h:43
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
PointerType * getType() const
Global values are always pointers.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
LLT getScalarType() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr ElementCount getElementCount() const
constexpr bool isPointerOrPointerVector() const
static LLT integer(unsigned SizeInBits)
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
Metadata node.
Definition Metadata.h:1080
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWFMaximum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAXIMUM Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value, std::optional< unsigned > Flags=std::nullopt)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildAtomicRMWFMinimum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMINIMUM Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, / MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStoreInstr(unsigned Opcode, const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert <opcode> Val, Addr, MMO.
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Undef
Value of the register doesn't matter.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
DWARFExpression::Operation Op
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:658
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.