LLVM 23.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
46
52
55 const MDNode *Expr) {
56 assert(isa<DILocalVariable>(Variable) && "not a variable");
57 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
58 assert(
59 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
60 "Expected inlined-at fields to agree");
61 return insertInstr(BuildMI(getMF(), getDL(),
62 getTII().get(TargetOpcode::DBG_VALUE),
63 /*IsIndirect*/ false, Reg, Variable, Expr));
64}
65
68 const MDNode *Expr) {
69 assert(isa<DILocalVariable>(Variable) && "not a variable");
70 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
71 assert(
72 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
73 "Expected inlined-at fields to agree");
74 return insertInstr(BuildMI(getMF(), getDL(),
75 getTII().get(TargetOpcode::DBG_VALUE),
76 /*IsIndirect*/ true, Reg, Variable, Expr));
77}
78
80 const MDNode *Variable,
81 const MDNode *Expr) {
82 assert(isa<DILocalVariable>(Variable) && "not a variable");
83 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
84 assert(
85 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
86 "Expected inlined-at fields to agree");
87 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
88 .addFrameIndex(FI)
89 .addImm(0)
90 .addMetadata(Variable)
91 .addMetadata(Expr));
92}
93
95 const MDNode *Variable,
96 const MDNode *Expr) {
97 assert(isa<DILocalVariable>(Variable) && "not a variable");
98 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
99 assert(
100 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
101 "Expected inlined-at fields to agree");
102 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
103
104 auto *NumericConstant = [&] () -> const Constant* {
105 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
106 if (CE->getOpcode() == Instruction::IntToPtr)
107 return CE->getOperand(0);
108 return &C;
109 }();
110
111 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
112 if (CI->getBitWidth() > 64)
113 MIB.addCImm(CI);
114 else if (CI->getBitWidth() == 1)
115 MIB.addImm(CI->getZExtValue());
116 else
117 MIB.addImm(CI->getSExtValue());
118 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
119 MIB.addFPImm(CFP);
120 } else if (isa<ConstantPointerNull>(NumericConstant)) {
121 MIB.addImm(0);
122 } else {
123 // Insert $noreg if we didn't find a usable constant and had to drop it.
124 MIB.addReg(Register());
125 }
126
127 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
128 return insertInstr(MIB);
129}
130
132 assert(isa<DILabel>(Label) && "not a label");
133 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
134 "Expected inlined-at fields to agree");
135 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
136
137 return MIB.addMetadata(Label);
138}
139
141 const SrcOp &Size,
142 Align Alignment) {
143 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
144 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
145 Res.addDefToMIB(*getMRI(), MIB);
146 Size.addSrcToMIB(MIB);
147 MIB.addImm(Alignment.value());
148 return MIB;
149}
150
152 int Idx) {
153 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
154 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
155 Res.addDefToMIB(*getMRI(), MIB);
156 MIB.addFrameIndex(Idx);
157 return MIB;
158}
159
161 const GlobalValue *GV) {
162 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
164 GV->getType()->getAddressSpace() &&
165 "address space mismatch");
166
167 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
168 Res.addDefToMIB(*getMRI(), MIB);
169 MIB.addGlobalAddress(GV);
170 return MIB;
171}
172
174 unsigned Idx) {
175 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
176 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
177 Res.addDefToMIB(*getMRI(), MIB);
178 MIB.addConstantPoolIndex(Idx);
179 return MIB;
180}
181
183 unsigned JTI) {
184 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
185 .addJumpTableIndex(JTI);
186}
187
188void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0) && "type mismatch");
191}
192
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0 && Res == Op1) && "type mismatch");
197}
198
199void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
200 const LLT Op1) {
201 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
202 assert((Res == Op0) && "type mismatch");
203}
204
207 const SrcOp &Op1, std::optional<unsigned> Flags) {
208 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
209 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
210 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
211
212 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
213}
214
222
223std::optional<MachineInstrBuilder>
225 const LLT ValueTy, uint64_t Value,
226 std::optional<unsigned> Flags) {
227 assert(Res == 0 && "Res is a result argument");
228 assert(ValueTy.isScalar() && "invalid offset type");
229
230 if (Value == 0) {
231 Res = Op0;
232 return std::nullopt;
233 }
234
236 auto Cst = buildConstant(ValueTy, Value);
237 return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
238}
239
240std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
241 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
242 return materializePtrAdd(Res, Op0, ValueTy, Value,
245}
246
248 const SrcOp &Op0,
249 uint32_t NumBits) {
250 LLT PtrTy = Res.getLLTTy(*getMRI());
251 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
252 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
254 return buildPtrMask(Res, Op0, MaskReg);
255}
256
259 const SrcOp &Op0) {
260 LLT ResTy = Res.getLLTTy(*getMRI());
261 LLT Op0Ty = Op0.getLLTTy(*getMRI());
262
263 assert(ResTy.isVector() && "Res non vector type");
264
266 if (Op0Ty.isVector()) {
267 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
268 "Different vector element types");
269 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
270 "Op0 has more elements");
271 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
272
273 for (auto Op : Unmerge.getInstr()->defs())
274 Regs.push_back(Op.getReg());
275 } else {
276 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
277 "Op0 has more size");
278 Regs.push_back(Op0.getReg());
279 }
281 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
282 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
283 for (unsigned i = 0; i < NumberOfPadElts; ++i)
284 Regs.push_back(Undef);
285 return buildMergeLikeInstr(Res, Regs);
286}
287
290 const SrcOp &Op0) {
291 LLT ResTy = Res.getLLTTy(*getMRI());
292 LLT Op0Ty = Op0.getLLTTy(*getMRI());
293
294 assert(Op0Ty.isVector() && "Non vector type");
295 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
296 (ResTy.isVector() &&
297 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
298 "Different vector element types");
299 assert(
300 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
301 "Op0 has fewer elements");
302
303 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
304 if (ResTy.isScalar())
305 return buildCopy(Res, Unmerge.getReg(0));
307 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
308 Regs.push_back(Unmerge.getReg(i));
309 return buildMergeLikeInstr(Res, Regs);
310}
311
313 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
314}
315
317 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
318 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
319}
320
322 unsigned JTI,
323 Register IndexReg) {
324 assert(getMRI()->getType(TablePtr).isPointer() &&
325 "Table reg must be a pointer");
326 return buildInstr(TargetOpcode::G_BRJT)
327 .addUse(TablePtr)
329 .addUse(IndexReg);
330}
331
333 const SrcOp &Op) {
334 return buildInstr(TargetOpcode::COPY, Res, Op);
335}
336
338 const ConstantInt &Val) {
339 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
340 LLT Ty = Res.getLLTTy(*getMRI());
341 LLT EltTy = Ty.getScalarType();
342 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
343 "creating constant with the wrong size");
344
345 assert(!Ty.isScalableVector() &&
346 "unexpected scalable vector in buildConstant");
347
348 if (Ty.isFixedVector()) {
349 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
350 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
351 .addCImm(&Val);
352 return buildSplatBuildVector(Res, Const);
353 }
354
355 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
356 Const->setDebugLoc(DebugLoc());
357 Res.addDefToMIB(*getMRI(), Const);
358 Const.addCImm(&Val);
359 return Const;
360}
361
363 int64_t Val) {
366 // TODO: Avoid implicit trunc?
367 // See https://github.com/llvm/llvm-project/issues/112510.
368 ConstantInt *CI = ConstantInt::getSigned(IntN, Val, /*implicitTrunc=*/true);
369 return buildConstant(Res, *CI);
370}
371
373 const ConstantFP &Val) {
374 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
375 LLT Ty = Res.getLLTTy(*getMRI());
376 LLT EltTy = Ty.getScalarType();
377
379 == EltTy.getSizeInBits() &&
380 "creating fconstant with the wrong size");
381
382 assert(!Ty.isPointer() && "invalid operand type");
383
384 assert(!Ty.isScalableVector() &&
385 "unexpected scalable vector in buildFConstant");
386
387 if (Ty.isFixedVector()) {
388 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
389 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
390 .addFPImm(&Val);
391
392 return buildSplatBuildVector(Res, Const);
393 }
394
395 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
396 Const->setDebugLoc(DebugLoc());
397 Res.addDefToMIB(*getMRI(), Const);
398 Const.addFPImm(&Val);
399 return Const;
400}
401
403 const APInt &Val) {
404 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
405 return buildConstant(Res, *CI);
406}
407
409 double Val) {
410 LLT DstTy = Res.getLLTTy(*getMRI());
411 auto &Ctx = getMF().getFunction().getContext();
412 auto *CFP =
413 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
414 return buildFConstant(Res, *CFP);
415}
416
418 const APFloat &Val) {
419 auto &Ctx = getMF().getFunction().getContext();
420 auto *CFP = ConstantFP::get(Ctx, Val);
421 return buildFConstant(Res, *CFP);
422}
423
426 const ConstantPtrAuth *CPA,
427 Register Addr, Register AddrDisc) {
428 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
429 Res.addDefToMIB(*getMRI(), MIB);
430 MIB.addUse(Addr);
431 MIB.addImm(CPA->getKey()->getZExtValue());
432 MIB.addUse(AddrDisc);
433 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
434 return MIB;
435}
436
438 MachineBasicBlock &Dest) {
439 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
440
441 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
442 Tst.addSrcToMIB(MIB);
443 MIB.addMBB(&Dest);
444 return MIB;
445}
446
449 MachinePointerInfo PtrInfo, Align Alignment,
451 const AAMDNodes &AAInfo) {
452 MMOFlags |= MachineMemOperand::MOLoad;
453 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
454
455 LLT Ty = Dst.getLLTTy(*getMRI());
456 MachineMemOperand *MMO =
457 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
458 return buildLoad(Dst, Addr, *MMO);
459}
460
462 const DstOp &Res,
463 const SrcOp &Addr,
464 MachineMemOperand &MMO) {
465 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
466 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
467
468 auto MIB = buildInstr(Opcode);
469 Res.addDefToMIB(*getMRI(), MIB);
470 Addr.addSrcToMIB(MIB);
471 MIB.addMemOperand(&MMO);
472 return MIB;
473}
474
476 const DstOp &Dst, const SrcOp &BasePtr,
477 MachineMemOperand &BaseMMO, int64_t Offset) {
478 LLT LoadTy = Dst.getLLTTy(*getMRI());
479 MachineMemOperand *OffsetMMO =
480 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
481
482 if (Offset == 0) // This may be a size or type changing load.
483 return buildLoad(Dst, BasePtr, *OffsetMMO);
484
485 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
486 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
487 auto ConstOffset = buildConstant(OffsetTy, Offset);
488 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
489 return buildLoad(Dst, Ptr, *OffsetMMO);
490}
491
493 const SrcOp &Addr,
494 MachineMemOperand &MMO) {
495 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
496 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
497
498 auto MIB = buildInstr(TargetOpcode::G_STORE);
499 Val.addSrcToMIB(MIB);
500 Addr.addSrcToMIB(MIB);
501 MIB.addMemOperand(&MMO);
502 return MIB;
503}
504
507 MachinePointerInfo PtrInfo, Align Alignment,
509 const AAMDNodes &AAInfo) {
510 MMOFlags |= MachineMemOperand::MOStore;
511 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
512
513 LLT Ty = Val.getLLTTy(*getMRI());
514 MachineMemOperand *MMO =
515 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
516 return buildStore(Val, Addr, *MMO);
517}
518
520 const SrcOp &Op) {
521 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
522}
523
525 const SrcOp &Op) {
526 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
527}
528
530 const SrcOp &Op,
531 std::optional<unsigned> Flags) {
532 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
533}
534
535unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
536 const auto *TLI = getMF().getSubtarget().getTargetLowering();
537 switch (TLI->getBooleanContents(IsVec, IsFP)) {
539 return TargetOpcode::G_SEXT;
541 return TargetOpcode::G_ZEXT;
542 default:
543 return TargetOpcode::G_ANYEXT;
544 }
545}
546
548 const SrcOp &Op,
549 bool IsFP) {
550 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
551 return buildInstr(ExtOp, Res, Op);
552}
553
555 const SrcOp &Op,
556 bool IsVector,
557 bool IsFP) {
558 const auto *TLI = getMF().getSubtarget().getTargetLowering();
559 switch (TLI->getBooleanContents(IsVector, IsFP)) {
561 return buildSExtInReg(Res, Op, 1);
563 return buildZExtInReg(Res, Op, 1);
565 return buildCopy(Res, Op);
566 }
567
568 llvm_unreachable("unexpected BooleanContent");
569}
570
572 const DstOp &Res,
573 const SrcOp &Op) {
574 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
575 TargetOpcode::G_SEXT == ExtOpc) &&
576 "Expecting Extending Opc");
577 assert(Res.getLLTTy(*getMRI()).isScalar() ||
578 Res.getLLTTy(*getMRI()).isVector());
579 assert(Res.getLLTTy(*getMRI()).isScalar() ==
580 Op.getLLTTy(*getMRI()).isScalar());
581
582 unsigned Opcode = TargetOpcode::COPY;
583 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
584 Op.getLLTTy(*getMRI()).getSizeInBits())
585 Opcode = ExtOpc;
586 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
587 Op.getLLTTy(*getMRI()).getSizeInBits())
588 Opcode = TargetOpcode::G_TRUNC;
589 else
590 assert(Res.getLLTTy(*getMRI()).getSizeInBits() ==
591 Op.getLLTTy(*getMRI()).getSizeInBits());
592
593 return buildInstr(Opcode, Res, Op);
594}
595
597 const SrcOp &Op) {
598 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
599}
600
602 const SrcOp &Op) {
603 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
604}
605
607 const SrcOp &Op) {
608 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
609}
610
612 const SrcOp &Op,
613 int64_t ImmOp) {
614 LLT ResTy = Res.getLLTTy(*getMRI());
615 auto Mask = buildConstant(
616 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
617 return buildAnd(Res, Op, Mask);
618}
619
621 const SrcOp &Src) {
622 LLT SrcTy = Src.getLLTTy(*getMRI());
623 LLT DstTy = Dst.getLLTTy(*getMRI());
624 if (SrcTy == DstTy)
625 return buildCopy(Dst, Src);
626
627 unsigned Opcode;
628 if (SrcTy.isPointerOrPointerVector())
629 Opcode = TargetOpcode::G_PTRTOINT;
630 else if (DstTy.isPointerOrPointerVector())
631 Opcode = TargetOpcode::G_INTTOPTR;
632 else {
633 assert(!SrcTy.isPointerOrPointerVector() &&
634 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
635 Opcode = TargetOpcode::G_BITCAST;
636 }
637
638 return buildInstr(Opcode, Dst, Src);
639}
640
642 const SrcOp &Src,
643 uint64_t Index) {
644 LLT SrcTy = Src.getLLTTy(*getMRI());
645 LLT DstTy = Dst.getLLTTy(*getMRI());
646
647#ifndef NDEBUG
648 assert(SrcTy.isValid() && "invalid operand type");
649 assert(DstTy.isValid() && "invalid operand type");
650 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
651 "extracting off end of register");
652#endif
653
654 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
655 assert(Index == 0 && "insertion past the end of a register");
656 return buildCast(Dst, Src);
657 }
658
659 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
660 Dst.addDefToMIB(*getMRI(), Extract);
661 Src.addSrcToMIB(Extract);
662 Extract.addImm(Index);
663 return Extract;
664}
665
667 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
668}
669
672 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
673 // we need some temporary storage for the DstOp objects. Here we use a
674 // sufficiently large SmallVector to not go through the heap.
676 assert(TmpVec.size() > 1);
677 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
678}
679
683 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
684 // we need some temporary storage for the DstOp objects. Here we use a
685 // sufficiently large SmallVector to not go through the heap.
687 assert(TmpVec.size() > 1);
688 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
689}
690
693 std::initializer_list<SrcOp> Ops) {
694 assert(Ops.size() > 1);
695 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
696}
697
698unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
699 ArrayRef<SrcOp> SrcOps) const {
700 if (DstOp.getLLTTy(*getMRI()).isVector()) {
701 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
702 return TargetOpcode::G_CONCAT_VECTORS;
703 return TargetOpcode::G_BUILD_VECTOR;
704 }
705
706 return TargetOpcode::G_MERGE_VALUES;
707}
708
710 const SrcOp &Op) {
711 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
712 // we need some temporary storage for the DstOp objects. Here we use a
713 // sufficiently large SmallVector to not go through the heap.
714 SmallVector<DstOp, 8> TmpVec(Res);
715 assert(TmpVec.size() > 1);
716 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
717}
718
720 const SrcOp &Op) {
721 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
722 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
723 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
724}
725
728 const SrcOp &Op) {
729 LLT OpTy = Op.getLLTTy(*getMRI());
730 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
731 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
732 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
733}
734
736 const SrcOp &Op) {
737 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
738 // we need some temporary storage for the DstOp objects. Here we use a
739 // sufficiently large SmallVector to not go through the heap.
740 SmallVector<DstOp, 8> TmpVec(Res);
741 assert(TmpVec.size() > 1);
742 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
743}
744
747 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
748 // we need some temporary storage for the DstOp objects. Here we use a
749 // sufficiently large SmallVector to not go through the heap.
751 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
752}
753
757 SmallVector<SrcOp> TmpVec;
758 TmpVec.reserve(Ops.size());
759 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
760 for (const auto &Op : Ops)
761 TmpVec.push_back(buildConstant(EltTy, Op));
762 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
763}
764
766 const SrcOp &Src) {
768 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
769}
770
774 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
775 // we need some temporary storage for the DstOp objects. Here we use a
776 // sufficiently large SmallVector to not go through the heap.
778 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
779 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
780 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
781 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
782}
783
785 const SrcOp &Src) {
786 LLT DstTy = Res.getLLTTy(*getMRI());
787 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
788 "Expected Src to match Dst elt ty");
789 auto UndefVec = buildUndef(DstTy);
790 auto Zero = buildConstant(LLT::integer(64), 0);
791 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
792 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
793 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
794}
795
797 const SrcOp &Src) {
798 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
799 "Expected Src to match Dst elt ty");
800 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
801}
802
804 const SrcOp &Src1,
805 const SrcOp &Src2,
806 ArrayRef<int> Mask) {
807 LLT DstTy = Res.getLLTTy(*getMRI());
808 LLT Src1Ty = Src1.getLLTTy(*getMRI());
809 LLT Src2Ty = Src2.getLLTTy(*getMRI());
810 const LLT DstElemTy = DstTy.getScalarType();
811 const LLT ElemTy1 = Src1Ty.getScalarType();
812 const LLT ElemTy2 = Src2Ty.getScalarType();
813 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
814 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
815 (void)DstElemTy;
816 (void)ElemTy1;
817 (void)ElemTy2;
818 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
819 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
820 .addShuffleMask(MaskAlloc);
821}
822
825 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
826 // we need some temporary storage for the DstOp objects. Here we use a
827 // sufficiently large SmallVector to not go through the heap.
829 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
830}
831
833 const SrcOp &Src,
834 const SrcOp &Op,
835 unsigned Index) {
836 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
837 Res.getLLTTy(*getMRI()).getSizeInBits() &&
838 "insertion past the end of a register");
839
840 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
841 Op.getLLTTy(*getMRI()).getSizeInBits()) {
842 return buildCast(Res, Op);
843 }
844
845 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
846}
847
849 unsigned Step) {
850 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
851 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
852 APInt(Bitwidth, Step));
853 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
854 StepVector->setDebugLoc(DebugLoc());
855 Res.addDefToMIB(*getMRI(), StepVector);
856 StepVector.addCImm(CI);
857 return StepVector;
858}
859
861 unsigned MinElts) {
862
865 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
866 return buildVScale(Res, *CI);
867}
868
870 const ConstantInt &MinElts) {
871 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
872 VScale->setDebugLoc(DebugLoc());
873 Res.addDefToMIB(*getMRI(), VScale);
874 VScale.addCImm(&MinElts);
875 return VScale;
876}
877
879 const APInt &MinElts) {
880 ConstantInt *CI =
881 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
882 return buildVScale(Res, *CI);
883}
884
885static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
886 if (HasSideEffects && IsConvergent)
887 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
888 if (HasSideEffects)
889 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
890 if (IsConvergent)
891 return TargetOpcode::G_INTRINSIC_CONVERGENT;
892 return TargetOpcode::G_INTRINSIC;
893}
894
897 ArrayRef<Register> ResultRegs,
898 bool HasSideEffects, bool isConvergent) {
899 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
900 for (Register ResultReg : ResultRegs)
901 MIB.addDef(ResultReg);
902 MIB.addIntrinsicID(ID);
903 return MIB;
904}
905
908 ArrayRef<Register> ResultRegs) {
910 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
911 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
912 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
913}
914
917 bool HasSideEffects,
918 bool isConvergent) {
919 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
920 for (DstOp Result : Results)
921 Result.addDefToMIB(*getMRI(), MIB);
922 MIB.addIntrinsicID(ID);
923 return MIB;
924}
925
929 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
930 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
931 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
932}
933
936 std::optional<unsigned> Flags) {
937 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
938}
939
942 std::optional<unsigned> Flags) {
943 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
944}
945
947 const DstOp &Res,
948 const SrcOp &Op0,
949 const SrcOp &Op1,
950 std::optional<unsigned> Flags) {
951 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
952}
953
955 const DstOp &Res,
956 const SrcOp &Op0,
957 const SrcOp &Op1,
958 std::optional<unsigned> Flags) {
959
960 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
961}
962
964 const SrcOp &Op0,
965 const SrcOp &Op1) {
966 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
967}
968
970 const SrcOp &Op0,
971 const SrcOp &Op1) {
972 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
973}
974
977 const SrcOp &Op0, const SrcOp &Op1,
978 std::optional<unsigned> Flags) {
980 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
981}
982
984 const SrcOp &Src0,
985 const SrcOp &Src1,
986 unsigned Idx) {
987 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
988 {Src0, Src1, uint64_t(Idx)});
989}
990
992 const SrcOp &Src,
993 unsigned Idx) {
994 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
995 {Src, uint64_t(Idx)});
996}
997
1000 const SrcOp &Elt, const SrcOp &Idx) {
1001 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
1002}
1003
1006 const SrcOp &Idx) {
1007 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
1008}
1009
1011 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1012 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1013#ifndef NDEBUG
1014 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1015 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1016 LLT AddrTy = Addr.getLLTTy(*getMRI());
1017 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1018 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1019 assert(OldValResTy.isScalar() && "invalid operand type");
1020 assert(SuccessResTy.isScalar() && "invalid operand type");
1021 assert(AddrTy.isPointer() && "invalid operand type");
1022 assert(CmpValTy.isValid() && "invalid operand type");
1023 assert(NewValTy.isValid() && "invalid operand type");
1024 assert(OldValResTy == CmpValTy && "type mismatch");
1025 assert(OldValResTy == NewValTy && "type mismatch");
1026#endif
1027
1028 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1029 OldValRes.addDefToMIB(*getMRI(), MIB);
1030 SuccessRes.addDefToMIB(*getMRI(), MIB);
1031 Addr.addSrcToMIB(MIB);
1032 CmpVal.addSrcToMIB(MIB);
1033 NewVal.addSrcToMIB(MIB);
1034 MIB.addMemOperand(&MMO);
1035 return MIB;
1036}
1037
1040 const SrcOp &CmpVal, const SrcOp &NewVal,
1041 MachineMemOperand &MMO) {
1042#ifndef NDEBUG
1043 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1044 LLT AddrTy = Addr.getLLTTy(*getMRI());
1045 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1046 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1047 assert(OldValResTy.isScalar() && "invalid operand type");
1048 assert(AddrTy.isPointer() && "invalid operand type");
1049 assert(CmpValTy.isValid() && "invalid operand type");
1050 assert(NewValTy.isValid() && "invalid operand type");
1051 assert(OldValResTy == CmpValTy && "type mismatch");
1052 assert(OldValResTy == NewValTy && "type mismatch");
1053#endif
1054
1055 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1056 OldValRes.addDefToMIB(*getMRI(), MIB);
1057 Addr.addSrcToMIB(MIB);
1058 CmpVal.addSrcToMIB(MIB);
1059 NewVal.addSrcToMIB(MIB);
1060 MIB.addMemOperand(&MMO);
1061 return MIB;
1062}
1063
1065 unsigned Opcode, const DstOp &OldValRes,
1066 const SrcOp &Addr, const SrcOp &Val,
1067 MachineMemOperand &MMO) {
1068
1069#ifndef NDEBUG
1070 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1071 LLT AddrTy = Addr.getLLTTy(*getMRI());
1072 LLT ValTy = Val.getLLTTy(*getMRI());
1073 assert(AddrTy.isPointer() && "invalid operand type");
1074 assert(ValTy.isValid() && "invalid operand type");
1075 assert(OldValResTy == ValTy && "type mismatch");
1076 assert(MMO.isAtomic() && "not atomic mem operand");
1077#endif
1078
1079 auto MIB = buildInstr(Opcode);
1080 OldValRes.addDefToMIB(*getMRI(), MIB);
1081 Addr.addSrcToMIB(MIB);
1082 Val.addSrcToMIB(MIB);
1083 MIB.addMemOperand(&MMO);
1084 return MIB;
1085}
1086
1089 Register Val, MachineMemOperand &MMO) {
1090 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1091 MMO);
1092}
1095 Register Val, MachineMemOperand &MMO) {
1096 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1097 MMO);
1098}
1101 Register Val, MachineMemOperand &MMO) {
1102 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1103 MMO);
1104}
1107 Register Val, MachineMemOperand &MMO) {
1108 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1109 MMO);
1110}
1113 Register Val, MachineMemOperand &MMO) {
1114 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1115 MMO);
1116}
1118 Register Addr,
1119 Register Val,
1120 MachineMemOperand &MMO) {
1121 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1122 MMO);
1123}
1126 Register Val, MachineMemOperand &MMO) {
1127 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1128 MMO);
1129}
1132 Register Val, MachineMemOperand &MMO) {
1133 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1134 MMO);
1135}
1138 Register Val, MachineMemOperand &MMO) {
1139 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1140 MMO);
1141}
1144 Register Val, MachineMemOperand &MMO) {
1145 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1146 MMO);
1147}
1150 Register Val, MachineMemOperand &MMO) {
1151 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1152 MMO);
1153}
1154
1157 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1158 MachineMemOperand &MMO) {
1159 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1160 MMO);
1161}
1162
1164MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1165 MachineMemOperand &MMO) {
1166 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1167 MMO);
1168}
1169
1172 const SrcOp &Val, MachineMemOperand &MMO) {
1173 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1174 MMO);
1175}
1176
1179 const SrcOp &Val, MachineMemOperand &MMO) {
1180 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1181 MMO);
1182}
1183
1186 const SrcOp &Addr, const SrcOp &Val,
1187 MachineMemOperand &MMO) {
1188 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1189 Val, MMO);
1190}
1191
1194 const SrcOp &Addr, const SrcOp &Val,
1195 MachineMemOperand &MMO) {
1196 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1197 Val, MMO);
1198}
1199
1201MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1202 return buildInstr(TargetOpcode::G_FENCE)
1203 .addImm(Ordering)
1204 .addImm(Scope);
1205}
1206
1208 unsigned RW,
1209 unsigned Locality,
1210 unsigned CacheType,
1211 MachineMemOperand &MMO) {
1212 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1213 Addr.addSrcToMIB(MIB);
1214 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1215 MIB.addMemOperand(&MMO);
1216 return MIB;
1217}
1218
1221#ifndef NDEBUG
1222 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1223#endif
1224
1225 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1226}
1227
1228void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1229 bool IsExtend) {
1230#ifndef NDEBUG
1231 if (DstTy.isVector()) {
1232 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1233 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1234 "different number of elements in a trunc/ext");
1235 } else
1236 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1237
1238 if (IsExtend)
1239 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1240 "invalid narrowing extend");
1241 else
1242 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1243 "invalid widening trunc");
1244#endif
1245}
1246
1247void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1248 const LLT Op0Ty, const LLT Op1Ty) {
1249#ifndef NDEBUG
1250 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1251 "invalid operand type");
1252 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1253 if (ResTy.isScalar() || ResTy.isPointer())
1254 assert(TstTy.isScalar() && "type mismatch");
1255 else
1256 assert((TstTy.isScalar() ||
1257 (TstTy.isVector() &&
1258 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1259 "type mismatch");
1260#endif
1261}
1262
1265 ArrayRef<SrcOp> SrcOps,
1266 std::optional<unsigned> Flags) {
1267 switch (Opc) {
1268 default:
1269 break;
1270 case TargetOpcode::G_SELECT: {
1271 assert(DstOps.size() == 1 && "Invalid select");
1272 assert(SrcOps.size() == 3 && "Invalid select");
1274 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1275 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1276 break;
1277 }
1278 case TargetOpcode::G_FNEG:
1279 case TargetOpcode::G_ABS:
1280 // All these are unary ops.
1281 assert(DstOps.size() == 1 && "Invalid Dst");
1282 assert(SrcOps.size() == 1 && "Invalid Srcs");
1283 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1284 SrcOps[0].getLLTTy(*getMRI()));
1285 break;
1286 case TargetOpcode::G_ADD:
1287 case TargetOpcode::G_AND:
1288 case TargetOpcode::G_MUL:
1289 case TargetOpcode::G_OR:
1290 case TargetOpcode::G_SUB:
1291 case TargetOpcode::G_XOR:
1292 case TargetOpcode::G_UDIV:
1293 case TargetOpcode::G_SDIV:
1294 case TargetOpcode::G_UREM:
1295 case TargetOpcode::G_SREM:
1296 case TargetOpcode::G_SMIN:
1297 case TargetOpcode::G_SMAX:
1298 case TargetOpcode::G_UMIN:
1299 case TargetOpcode::G_UMAX:
1300 case TargetOpcode::G_UADDSAT:
1301 case TargetOpcode::G_SADDSAT:
1302 case TargetOpcode::G_USUBSAT:
1303 case TargetOpcode::G_SSUBSAT: {
1304 // All these are binary ops.
1305 assert(DstOps.size() == 1 && "Invalid Dst");
1306 assert(SrcOps.size() == 2 && "Invalid Srcs");
1307 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1308 SrcOps[0].getLLTTy(*getMRI()),
1309 SrcOps[1].getLLTTy(*getMRI()));
1310 break;
1311 }
1312 case TargetOpcode::G_SHL:
1313 case TargetOpcode::G_ASHR:
1314 case TargetOpcode::G_LSHR:
1315 case TargetOpcode::G_USHLSAT:
1316 case TargetOpcode::G_SSHLSAT: {
1317 assert(DstOps.size() == 1 && "Invalid Dst");
1318 assert(SrcOps.size() == 2 && "Invalid Srcs");
1319 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1320 SrcOps[0].getLLTTy(*getMRI()),
1321 SrcOps[1].getLLTTy(*getMRI()));
1322 break;
1323 }
1324 case TargetOpcode::G_SEXT:
1325 case TargetOpcode::G_ZEXT:
1326 case TargetOpcode::G_ANYEXT:
1327 assert(DstOps.size() == 1 && "Invalid Dst");
1328 assert(SrcOps.size() == 1 && "Invalid Srcs");
1329 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1330 SrcOps[0].getLLTTy(*getMRI()), true);
1331 break;
1332 case TargetOpcode::G_TRUNC:
1333 case TargetOpcode::G_FPTRUNC: {
1334 assert(DstOps.size() == 1 && "Invalid Dst");
1335 assert(SrcOps.size() == 1 && "Invalid Srcs");
1336 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1337 SrcOps[0].getLLTTy(*getMRI()), false);
1338 break;
1339 }
1340 case TargetOpcode::G_BITCAST: {
1341 assert(DstOps.size() == 1 && "Invalid Dst");
1342 assert(SrcOps.size() == 1 && "Invalid Srcs");
1343 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1344 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1345 break;
1346 }
1347 case TargetOpcode::COPY:
1348 assert(DstOps.size() == 1 && "Invalid Dst");
1349 // If the caller wants to add a subreg source it has to be done separately
1350 // so we may not have any SrcOps at this point yet.
1351 break;
1352 case TargetOpcode::G_FCMP:
1353 case TargetOpcode::G_ICMP: {
1354 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1355 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1356 // For F/ICMP, the first src operand is the predicate, followed by
1357 // the two comparands.
1358 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1359 "Expecting predicate");
1360 assert([&]() -> bool {
1361 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1362 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1363 : CmpInst::isFPPredicate(Pred);
1364 }() && "Invalid predicate");
1365 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1366 "Type mismatch");
1367 assert([&]() -> bool {
1368 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1369 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1370 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1371 return DstTy.isScalar();
1372 else
1373 return DstTy.isVector() &&
1374 DstTy.getElementCount() == Op0Ty.getElementCount();
1375 }() && "Type Mismatch");
1376 break;
1377 }
1378 case TargetOpcode::G_UNMERGE_VALUES: {
1379 assert(!DstOps.empty() && "Invalid trivial sequence");
1380 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1381 assert(llvm::all_of(DstOps,
1382 [&, this](const DstOp &Op) {
1383 return Op.getLLTTy(*getMRI()) ==
1384 DstOps[0].getLLTTy(*getMRI());
1385 }) &&
1386 "type mismatch in output list");
1387 assert((TypeSize::ScalarTy)DstOps.size() *
1388 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1389 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1390 "input operands do not cover output register");
1391 break;
1392 }
1393 case TargetOpcode::G_MERGE_VALUES: {
1394 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1395 assert(DstOps.size() == 1 && "Invalid Dst");
1396 assert(llvm::all_of(SrcOps,
1397 [&, this](const SrcOp &Op) {
1398 return Op.getLLTTy(*getMRI()) ==
1399 SrcOps[0].getLLTTy(*getMRI());
1400 }) &&
1401 "type mismatch in input list");
1402 assert((TypeSize::ScalarTy)SrcOps.size() *
1403 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1404 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1405 "input operands do not cover output register");
1406 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1407 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1408 break;
1409 }
1410 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1411 assert(DstOps.size() == 1 && "Invalid Dst size");
1412 assert(SrcOps.size() == 2 && "Invalid Src size");
1413 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1414 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1415 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1416 "Invalid operand type");
1417 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1418 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1419 DstOps[0].getLLTTy(*getMRI()) &&
1420 "Type mismatch");
1421 break;
1422 }
1423 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1424 assert(DstOps.size() == 1 && "Invalid dst size");
1425 assert(SrcOps.size() == 3 && "Invalid src size");
1426 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1427 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1428 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1429 SrcOps[1].getLLTTy(*getMRI()) &&
1430 "Type mismatch");
1431 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1432 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1433 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1434 "Type mismatch");
1435 break;
1436 }
1437 case TargetOpcode::G_BUILD_VECTOR: {
1438 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1439 "Must have at least 2 operands");
1440 assert(DstOps.size() == 1 && "Invalid DstOps");
1441 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1442 "Res type must be a vector");
1443 assert(llvm::all_of(SrcOps,
1444 [&, this](const SrcOp &Op) {
1445 return Op.getLLTTy(*getMRI()) ==
1446 SrcOps[0].getLLTTy(*getMRI());
1447 }) &&
1448 "type mismatch in input list");
1449 assert((TypeSize::ScalarTy)SrcOps.size() *
1450 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1451 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1452 "input scalars do not exactly cover the output vector register");
1453 break;
1454 }
1455 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1456 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1457 "Must have at least 2 operands");
1458 assert(DstOps.size() == 1 && "Invalid DstOps");
1459 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1460 "Res type must be a vector");
1461 assert(llvm::all_of(SrcOps,
1462 [&, this](const SrcOp &Op) {
1463 return Op.getLLTTy(*getMRI()) ==
1464 SrcOps[0].getLLTTy(*getMRI());
1465 }) &&
1466 "type mismatch in input list");
1467 break;
1468 }
1469 case TargetOpcode::G_CONCAT_VECTORS: {
1470 assert(DstOps.size() == 1 && "Invalid DstOps");
1471 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1472 "Must have at least 2 operands");
1473 assert(llvm::all_of(SrcOps,
1474 [&, this](const SrcOp &Op) {
1475 return (Op.getLLTTy(*getMRI()).isVector() &&
1476 Op.getLLTTy(*getMRI()) ==
1477 SrcOps[0].getLLTTy(*getMRI()));
1478 }) &&
1479 "type mismatch in input list");
1480 assert((TypeSize::ScalarTy)SrcOps.size() *
1481 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1482 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1483 "input vectors do not exactly cover the output vector register");
1484 break;
1485 }
1486 case TargetOpcode::G_UADDE: {
1487 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1488 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1489 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1490 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1491 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1492 "Invalid operand");
1493 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1494 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1495 "type mismatch");
1496 break;
1497 }
1498 }
1499
1500 auto MIB = buildInstr(Opc);
1501 for (const DstOp &Op : DstOps)
1502 Op.addDefToMIB(*getMRI(), MIB);
1503 for (const SrcOp &Op : SrcOps)
1504 Op.addSrcToMIB(MIB);
1505 if (Flags)
1506 MIB->setFlags(*Flags);
1507 return MIB;
1508}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Function Alias Analysis Results
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:278
const fltSemantics & getSemantics() const
Definition APFloat.h:1524
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:407
The address of a basic block.
Definition Constants.h:1065
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:420
const APFloat & getValueAPF() const
Definition Constants.h:463
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:135
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
A signed pointer, in the ptrauth sense.
Definition Constants.h:1198
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1229
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1232
This is an important base class in LLVM.
Definition Constant.h:43
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:358
PointerType * getType() const
Global values are always pointers.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:354
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
LLT getScalarType() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr ElementCount getElementCount() const
constexpr bool isPointerOrPointerVector() const
static LLT integer(unsigned SizeInBits)
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
Metadata node.
Definition Metadata.h:1080
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWFMaximum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAXIMUM Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value, std::optional< unsigned > Flags=std::nullopt)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildAtomicRMWFMinimum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMINIMUM Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, / MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Undef
Value of the register doesn't matter.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
DWARFExpression::Operation Op
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:658
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.