LLVM 22.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
46
52
55 const MDNode *Expr) {
56 assert(isa<DILocalVariable>(Variable) && "not a variable");
57 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
58 assert(
59 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
60 "Expected inlined-at fields to agree");
61 return insertInstr(BuildMI(getMF(), getDL(),
62 getTII().get(TargetOpcode::DBG_VALUE),
63 /*IsIndirect*/ false, Reg, Variable, Expr));
64}
65
68 const MDNode *Expr) {
69 assert(isa<DILocalVariable>(Variable) && "not a variable");
70 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
71 assert(
72 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
73 "Expected inlined-at fields to agree");
74 return insertInstr(BuildMI(getMF(), getDL(),
75 getTII().get(TargetOpcode::DBG_VALUE),
76 /*IsIndirect*/ true, Reg, Variable, Expr));
77}
78
80 const MDNode *Variable,
81 const MDNode *Expr) {
82 assert(isa<DILocalVariable>(Variable) && "not a variable");
83 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
84 assert(
85 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
86 "Expected inlined-at fields to agree");
87 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
88 .addFrameIndex(FI)
89 .addImm(0)
90 .addMetadata(Variable)
91 .addMetadata(Expr));
92}
93
95 const MDNode *Variable,
96 const MDNode *Expr) {
97 assert(isa<DILocalVariable>(Variable) && "not a variable");
98 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
99 assert(
100 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
101 "Expected inlined-at fields to agree");
102 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
103
104 auto *NumericConstant = [&] () -> const Constant* {
105 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
106 if (CE->getOpcode() == Instruction::IntToPtr)
107 return CE->getOperand(0);
108 return &C;
109 }();
110
111 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
112 if (CI->getBitWidth() > 64)
113 MIB.addCImm(CI);
114 else if (CI->getBitWidth() == 1)
115 MIB.addImm(CI->getZExtValue());
116 else
117 MIB.addImm(CI->getSExtValue());
118 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
119 MIB.addFPImm(CFP);
120 } else if (isa<ConstantPointerNull>(NumericConstant)) {
121 MIB.addImm(0);
122 } else {
123 // Insert $noreg if we didn't find a usable constant and had to drop it.
124 MIB.addReg(Register());
125 }
126
127 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
128 return insertInstr(MIB);
129}
130
132 assert(isa<DILabel>(Label) && "not a label");
133 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
134 "Expected inlined-at fields to agree");
135 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
136
137 return MIB.addMetadata(Label);
138}
139
141 const SrcOp &Size,
142 Align Alignment) {
143 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
144 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
145 Res.addDefToMIB(*getMRI(), MIB);
146 Size.addSrcToMIB(MIB);
147 MIB.addImm(Alignment.value());
148 return MIB;
149}
150
152 int Idx) {
153 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
154 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
155 Res.addDefToMIB(*getMRI(), MIB);
156 MIB.addFrameIndex(Idx);
157 return MIB;
158}
159
161 const GlobalValue *GV) {
162 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
164 GV->getType()->getAddressSpace() &&
165 "address space mismatch");
166
167 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
168 Res.addDefToMIB(*getMRI(), MIB);
169 MIB.addGlobalAddress(GV);
170 return MIB;
171}
172
174 unsigned Idx) {
175 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
176 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
177 Res.addDefToMIB(*getMRI(), MIB);
178 MIB.addConstantPoolIndex(Idx);
179 return MIB;
180}
181
183 unsigned JTI) {
184 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
185 .addJumpTableIndex(JTI);
186}
187
188void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0) && "type mismatch");
191}
192
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0 && Res == Op1) && "type mismatch");
197}
198
199void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
200 const LLT Op1) {
201 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
202 assert((Res == Op0) && "type mismatch");
203}
204
207 const SrcOp &Op1, std::optional<unsigned> Flags) {
208 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
209 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
210 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
211
212 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
213}
214
222
223std::optional<MachineInstrBuilder>
225 const LLT ValueTy, uint64_t Value,
226 std::optional<unsigned> Flags) {
227 assert(Res == 0 && "Res is a result argument");
228 assert(ValueTy.isScalar() && "invalid offset type");
229
230 if (Value == 0) {
231 Res = Op0;
232 return std::nullopt;
233 }
234
236 auto Cst = buildConstant(ValueTy, Value);
237 return buildPtrAdd(Res, Op0, Cst.getReg(0), Flags);
238}
239
240std::optional<MachineInstrBuilder> MachineIRBuilder::materializeObjectPtrOffset(
241 Register &Res, Register Op0, const LLT ValueTy, uint64_t Value) {
242 return materializePtrAdd(Res, Op0, ValueTy, Value,
245}
246
248 const SrcOp &Op0,
249 uint32_t NumBits) {
250 LLT PtrTy = Res.getLLTTy(*getMRI());
251 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
252 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
254 return buildPtrMask(Res, Op0, MaskReg);
255}
256
259 const SrcOp &Op0) {
260 LLT ResTy = Res.getLLTTy(*getMRI());
261 LLT Op0Ty = Op0.getLLTTy(*getMRI());
262
263 assert(ResTy.isVector() && "Res non vector type");
264
266 if (Op0Ty.isVector()) {
267 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
268 "Different vector element types");
269 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
270 "Op0 has more elements");
271 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
272
273 for (auto Op : Unmerge.getInstr()->defs())
274 Regs.push_back(Op.getReg());
275 } else {
276 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
277 "Op0 has more size");
278 Regs.push_back(Op0.getReg());
279 }
280 Register Undef =
281 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
282 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
283 for (unsigned i = 0; i < NumberOfPadElts; ++i)
284 Regs.push_back(Undef);
285 return buildMergeLikeInstr(Res, Regs);
286}
287
290 const SrcOp &Op0) {
291 LLT ResTy = Res.getLLTTy(*getMRI());
292 LLT Op0Ty = Op0.getLLTTy(*getMRI());
293
294 assert(Op0Ty.isVector() && "Non vector type");
295 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
296 (ResTy.isVector() &&
297 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
298 "Different vector element types");
299 assert(
300 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
301 "Op0 has fewer elements");
302
303 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
304 if (ResTy.isScalar())
305 return buildCopy(Res, Unmerge.getReg(0));
307 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
308 Regs.push_back(Unmerge.getReg(i));
309 return buildMergeLikeInstr(Res, Regs);
310}
311
313 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
314}
315
317 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
318 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
319}
320
322 unsigned JTI,
323 Register IndexReg) {
324 assert(getMRI()->getType(TablePtr).isPointer() &&
325 "Table reg must be a pointer");
326 return buildInstr(TargetOpcode::G_BRJT)
327 .addUse(TablePtr)
329 .addUse(IndexReg);
330}
331
333 const SrcOp &Op) {
334 return buildInstr(TargetOpcode::COPY, Res, Op);
335}
336
338 const ConstantInt &Val) {
339 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
340 LLT Ty = Res.getLLTTy(*getMRI());
341 LLT EltTy = Ty.getScalarType();
342 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
343 "creating constant with the wrong size");
344
345 assert(!Ty.isScalableVector() &&
346 "unexpected scalable vector in buildConstant");
347
348 if (Ty.isFixedVector()) {
349 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
350 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
351 .addCImm(&Val);
352 return buildSplatBuildVector(Res, Const);
353 }
354
355 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
356 Const->setDebugLoc(DebugLoc());
357 Res.addDefToMIB(*getMRI(), Const);
358 Const.addCImm(&Val);
359 return Const;
360}
361
363 int64_t Val) {
366 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
367 return buildConstant(Res, *CI);
368}
369
371 const ConstantFP &Val) {
372 assert(!isa<VectorType>(Val.getType()) && "Unexpected vector constant!");
373 LLT Ty = Res.getLLTTy(*getMRI());
374 LLT EltTy = Ty.getScalarType();
375
377 == EltTy.getSizeInBits() &&
378 "creating fconstant with the wrong size");
379
380 assert(!Ty.isPointer() && "invalid operand type");
381
382 assert(!Ty.isScalableVector() &&
383 "unexpected scalable vector in buildFConstant");
384
385 if (Ty.isFixedVector()) {
386 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
387 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
388 .addFPImm(&Val);
389
390 return buildSplatBuildVector(Res, Const);
391 }
392
393 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
394 Const->setDebugLoc(DebugLoc());
395 Res.addDefToMIB(*getMRI(), Const);
396 Const.addFPImm(&Val);
397 return Const;
398}
399
401 const APInt &Val) {
402 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
403 return buildConstant(Res, *CI);
404}
405
407 double Val) {
408 LLT DstTy = Res.getLLTTy(*getMRI());
409 auto &Ctx = getMF().getFunction().getContext();
410 auto *CFP =
411 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
412 return buildFConstant(Res, *CFP);
413}
414
416 const APFloat &Val) {
417 auto &Ctx = getMF().getFunction().getContext();
418 auto *CFP = ConstantFP::get(Ctx, Val);
419 return buildFConstant(Res, *CFP);
420}
421
424 const ConstantPtrAuth *CPA,
425 Register Addr, Register AddrDisc) {
426 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
427 Res.addDefToMIB(*getMRI(), MIB);
428 MIB.addUse(Addr);
429 MIB.addImm(CPA->getKey()->getZExtValue());
430 MIB.addUse(AddrDisc);
431 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
432 return MIB;
433}
434
436 MachineBasicBlock &Dest) {
437 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
438
439 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
440 Tst.addSrcToMIB(MIB);
441 MIB.addMBB(&Dest);
442 return MIB;
443}
444
447 MachinePointerInfo PtrInfo, Align Alignment,
449 const AAMDNodes &AAInfo) {
450 MMOFlags |= MachineMemOperand::MOLoad;
451 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
452
453 LLT Ty = Dst.getLLTTy(*getMRI());
454 MachineMemOperand *MMO =
455 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
456 return buildLoad(Dst, Addr, *MMO);
457}
458
460 const DstOp &Res,
461 const SrcOp &Addr,
462 MachineMemOperand &MMO) {
463 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
464 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
465
466 auto MIB = buildInstr(Opcode);
467 Res.addDefToMIB(*getMRI(), MIB);
468 Addr.addSrcToMIB(MIB);
469 MIB.addMemOperand(&MMO);
470 return MIB;
471}
472
474 const DstOp &Dst, const SrcOp &BasePtr,
475 MachineMemOperand &BaseMMO, int64_t Offset) {
476 LLT LoadTy = Dst.getLLTTy(*getMRI());
477 MachineMemOperand *OffsetMMO =
478 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
479
480 if (Offset == 0) // This may be a size or type changing load.
481 return buildLoad(Dst, BasePtr, *OffsetMMO);
482
483 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
484 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
485 auto ConstOffset = buildConstant(OffsetTy, Offset);
486 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
487 return buildLoad(Dst, Ptr, *OffsetMMO);
488}
489
491 const SrcOp &Addr,
492 MachineMemOperand &MMO) {
493 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
494 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
495
496 auto MIB = buildInstr(TargetOpcode::G_STORE);
497 Val.addSrcToMIB(MIB);
498 Addr.addSrcToMIB(MIB);
499 MIB.addMemOperand(&MMO);
500 return MIB;
501}
502
505 MachinePointerInfo PtrInfo, Align Alignment,
507 const AAMDNodes &AAInfo) {
508 MMOFlags |= MachineMemOperand::MOStore;
509 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
510
511 LLT Ty = Val.getLLTTy(*getMRI());
512 MachineMemOperand *MMO =
513 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
514 return buildStore(Val, Addr, *MMO);
515}
516
518 const SrcOp &Op) {
519 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
520}
521
523 const SrcOp &Op) {
524 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
525}
526
528 const SrcOp &Op,
529 std::optional<unsigned> Flags) {
530 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
531}
532
533unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
534 const auto *TLI = getMF().getSubtarget().getTargetLowering();
535 switch (TLI->getBooleanContents(IsVec, IsFP)) {
537 return TargetOpcode::G_SEXT;
539 return TargetOpcode::G_ZEXT;
540 default:
541 return TargetOpcode::G_ANYEXT;
542 }
543}
544
546 const SrcOp &Op,
547 bool IsFP) {
548 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
549 return buildInstr(ExtOp, Res, Op);
550}
551
553 const SrcOp &Op,
554 bool IsVector,
555 bool IsFP) {
556 const auto *TLI = getMF().getSubtarget().getTargetLowering();
557 switch (TLI->getBooleanContents(IsVector, IsFP)) {
559 return buildSExtInReg(Res, Op, 1);
561 return buildZExtInReg(Res, Op, 1);
563 return buildCopy(Res, Op);
564 }
565
566 llvm_unreachable("unexpected BooleanContent");
567}
568
570 const DstOp &Res,
571 const SrcOp &Op) {
572 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
573 TargetOpcode::G_SEXT == ExtOpc) &&
574 "Expecting Extending Opc");
575 assert(Res.getLLTTy(*getMRI()).isScalar() ||
576 Res.getLLTTy(*getMRI()).isVector());
577 assert(Res.getLLTTy(*getMRI()).isScalar() ==
578 Op.getLLTTy(*getMRI()).isScalar());
579
580 unsigned Opcode = TargetOpcode::COPY;
581 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
582 Op.getLLTTy(*getMRI()).getSizeInBits())
583 Opcode = ExtOpc;
584 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
585 Op.getLLTTy(*getMRI()).getSizeInBits())
586 Opcode = TargetOpcode::G_TRUNC;
587 else
588 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
589
590 return buildInstr(Opcode, Res, Op);
591}
592
594 const SrcOp &Op) {
595 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
596}
597
599 const SrcOp &Op) {
600 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
601}
602
604 const SrcOp &Op) {
605 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
606}
607
609 const SrcOp &Op,
610 int64_t ImmOp) {
611 LLT ResTy = Res.getLLTTy(*getMRI());
612 auto Mask = buildConstant(
613 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
614 return buildAnd(Res, Op, Mask);
615}
616
618 const SrcOp &Src) {
619 LLT SrcTy = Src.getLLTTy(*getMRI());
620 LLT DstTy = Dst.getLLTTy(*getMRI());
621 if (SrcTy == DstTy)
622 return buildCopy(Dst, Src);
623
624 unsigned Opcode;
625 if (SrcTy.isPointerOrPointerVector())
626 Opcode = TargetOpcode::G_PTRTOINT;
627 else if (DstTy.isPointerOrPointerVector())
628 Opcode = TargetOpcode::G_INTTOPTR;
629 else {
630 assert(!SrcTy.isPointerOrPointerVector() &&
631 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
632 Opcode = TargetOpcode::G_BITCAST;
633 }
634
635 return buildInstr(Opcode, Dst, Src);
636}
637
639 const SrcOp &Src,
640 uint64_t Index) {
641 LLT SrcTy = Src.getLLTTy(*getMRI());
642 LLT DstTy = Dst.getLLTTy(*getMRI());
643
644#ifndef NDEBUG
645 assert(SrcTy.isValid() && "invalid operand type");
646 assert(DstTy.isValid() && "invalid operand type");
647 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
648 "extracting off end of register");
649#endif
650
651 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
652 assert(Index == 0 && "insertion past the end of a register");
653 return buildCast(Dst, Src);
654 }
655
656 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
657 Dst.addDefToMIB(*getMRI(), Extract);
658 Src.addSrcToMIB(Extract);
659 Extract.addImm(Index);
660 return Extract;
661}
662
664 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
665}
666
669 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
670 // we need some temporary storage for the DstOp objects. Here we use a
671 // sufficiently large SmallVector to not go through the heap.
673 assert(TmpVec.size() > 1);
674 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
675}
676
680 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
681 // we need some temporary storage for the DstOp objects. Here we use a
682 // sufficiently large SmallVector to not go through the heap.
684 assert(TmpVec.size() > 1);
685 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
686}
687
690 std::initializer_list<SrcOp> Ops) {
691 assert(Ops.size() > 1);
692 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
693}
694
695unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
696 ArrayRef<SrcOp> SrcOps) const {
697 if (DstOp.getLLTTy(*getMRI()).isVector()) {
698 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
699 return TargetOpcode::G_CONCAT_VECTORS;
700 return TargetOpcode::G_BUILD_VECTOR;
701 }
702
703 return TargetOpcode::G_MERGE_VALUES;
704}
705
707 const SrcOp &Op) {
708 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
709 // we need some temporary storage for the DstOp objects. Here we use a
710 // sufficiently large SmallVector to not go through the heap.
711 SmallVector<DstOp, 8> TmpVec(Res);
712 assert(TmpVec.size() > 1);
713 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
714}
715
717 const SrcOp &Op) {
718 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
719 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
720 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
721}
722
725 const SrcOp &Op) {
726 LLT OpTy = Op.getLLTTy(*getMRI());
727 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
728 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
729 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
730}
731
733 const SrcOp &Op) {
734 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
735 // we need some temporary storage for the DstOp objects. Here we use a
736 // sufficiently large SmallVector to not go through the heap.
737 SmallVector<DstOp, 8> TmpVec(Res);
738 assert(TmpVec.size() > 1);
739 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
740}
741
744 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
745 // we need some temporary storage for the DstOp objects. Here we use a
746 // sufficiently large SmallVector to not go through the heap.
748 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
749}
750
754 SmallVector<SrcOp> TmpVec;
755 TmpVec.reserve(Ops.size());
756 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
757 for (const auto &Op : Ops)
758 TmpVec.push_back(buildConstant(EltTy, Op));
759 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
760}
761
763 const SrcOp &Src) {
765 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
766}
767
771 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
772 // we need some temporary storage for the DstOp objects. Here we use a
773 // sufficiently large SmallVector to not go through the heap.
775 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
776 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
777 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
778 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
779}
780
782 const SrcOp &Src) {
783 LLT DstTy = Res.getLLTTy(*getMRI());
784 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
785 "Expected Src to match Dst elt ty");
786 auto UndefVec = buildUndef(DstTy);
787 auto Zero = buildConstant(LLT::scalar(64), 0);
788 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
789 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
790 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
791}
792
794 const SrcOp &Src) {
795 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
796 "Expected Src to match Dst elt ty");
797 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
798}
799
801 const SrcOp &Src1,
802 const SrcOp &Src2,
803 ArrayRef<int> Mask) {
804 LLT DstTy = Res.getLLTTy(*getMRI());
805 LLT Src1Ty = Src1.getLLTTy(*getMRI());
806 LLT Src2Ty = Src2.getLLTTy(*getMRI());
807 const LLT DstElemTy = DstTy.getScalarType();
808 const LLT ElemTy1 = Src1Ty.getScalarType();
809 const LLT ElemTy2 = Src2Ty.getScalarType();
810 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
811 assert(Mask.size() > 1 && "Scalar G_SHUFFLE_VECTOR are not supported");
812 (void)DstElemTy;
813 (void)ElemTy1;
814 (void)ElemTy2;
815 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
816 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
817 .addShuffleMask(MaskAlloc);
818}
819
822 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
823 // we need some temporary storage for the DstOp objects. Here we use a
824 // sufficiently large SmallVector to not go through the heap.
826 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
827}
828
830 const SrcOp &Src,
831 const SrcOp &Op,
832 unsigned Index) {
833 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
834 Res.getLLTTy(*getMRI()).getSizeInBits() &&
835 "insertion past the end of a register");
836
837 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
838 Op.getLLTTy(*getMRI()).getSizeInBits()) {
839 return buildCast(Res, Op);
840 }
841
842 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
843}
844
846 unsigned Step) {
847 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
848 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
849 APInt(Bitwidth, Step));
850 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
851 StepVector->setDebugLoc(DebugLoc());
852 Res.addDefToMIB(*getMRI(), StepVector);
853 StepVector.addCImm(CI);
854 return StepVector;
855}
856
858 unsigned MinElts) {
859
862 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
863 return buildVScale(Res, *CI);
864}
865
867 const ConstantInt &MinElts) {
868 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
869 VScale->setDebugLoc(DebugLoc());
870 Res.addDefToMIB(*getMRI(), VScale);
871 VScale.addCImm(&MinElts);
872 return VScale;
873}
874
876 const APInt &MinElts) {
877 ConstantInt *CI =
878 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
879 return buildVScale(Res, *CI);
880}
881
882static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
883 if (HasSideEffects && IsConvergent)
884 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
885 if (HasSideEffects)
886 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
887 if (IsConvergent)
888 return TargetOpcode::G_INTRINSIC_CONVERGENT;
889 return TargetOpcode::G_INTRINSIC;
890}
891
894 ArrayRef<Register> ResultRegs,
895 bool HasSideEffects, bool isConvergent) {
896 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
897 for (Register ResultReg : ResultRegs)
898 MIB.addDef(ResultReg);
899 MIB.addIntrinsicID(ID);
900 return MIB;
901}
902
905 ArrayRef<Register> ResultRegs) {
907 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
908 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
909 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
910}
911
914 bool HasSideEffects,
915 bool isConvergent) {
916 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
917 for (DstOp Result : Results)
918 Result.addDefToMIB(*getMRI(), MIB);
919 MIB.addIntrinsicID(ID);
920 return MIB;
921}
922
926 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
927 bool isConvergent = Attrs.hasAttribute(Attribute::Convergent);
928 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
929}
930
933 std::optional<unsigned> Flags) {
934 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
935}
936
939 std::optional<unsigned> Flags) {
940 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
941}
942
944 const DstOp &Res,
945 const SrcOp &Op0,
946 const SrcOp &Op1,
947 std::optional<unsigned> Flags) {
948 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
949}
950
952 const DstOp &Res,
953 const SrcOp &Op0,
954 const SrcOp &Op1,
955 std::optional<unsigned> Flags) {
956
957 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
958}
959
961 const SrcOp &Op0,
962 const SrcOp &Op1) {
963 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
967 const SrcOp &Op0,
968 const SrcOp &Op1) {
969 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
970}
971
974 const SrcOp &Op0, const SrcOp &Op1,
975 std::optional<unsigned> Flags) {
976
977 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
978}
981 const SrcOp &Src0,
982 const SrcOp &Src1,
983 unsigned Idx) {
984 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
985 {Src0, Src1, uint64_t(Idx)});
986}
987
989 const SrcOp &Src,
990 unsigned Idx) {
991 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
992 {Src, uint64_t(Idx)});
993}
994
997 const SrcOp &Elt, const SrcOp &Idx) {
998 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
999}
1000
1003 const SrcOp &Idx) {
1004 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
1005}
1006
1008 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
1009 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
1010#ifndef NDEBUG
1011 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1012 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
1013 LLT AddrTy = Addr.getLLTTy(*getMRI());
1014 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1015 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1016 assert(OldValResTy.isScalar() && "invalid operand type");
1017 assert(SuccessResTy.isScalar() && "invalid operand type");
1018 assert(AddrTy.isPointer() && "invalid operand type");
1019 assert(CmpValTy.isValid() && "invalid operand type");
1020 assert(NewValTy.isValid() && "invalid operand type");
1021 assert(OldValResTy == CmpValTy && "type mismatch");
1022 assert(OldValResTy == NewValTy && "type mismatch");
1023#endif
1024
1025 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1026 OldValRes.addDefToMIB(*getMRI(), MIB);
1027 SuccessRes.addDefToMIB(*getMRI(), MIB);
1028 Addr.addSrcToMIB(MIB);
1029 CmpVal.addSrcToMIB(MIB);
1030 NewVal.addSrcToMIB(MIB);
1031 MIB.addMemOperand(&MMO);
1032 return MIB;
1033}
1034
1037 const SrcOp &CmpVal, const SrcOp &NewVal,
1038 MachineMemOperand &MMO) {
1039#ifndef NDEBUG
1040 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1041 LLT AddrTy = Addr.getLLTTy(*getMRI());
1042 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1043 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1044 assert(OldValResTy.isScalar() && "invalid operand type");
1045 assert(AddrTy.isPointer() && "invalid operand type");
1046 assert(CmpValTy.isValid() && "invalid operand type");
1047 assert(NewValTy.isValid() && "invalid operand type");
1048 assert(OldValResTy == CmpValTy && "type mismatch");
1049 assert(OldValResTy == NewValTy && "type mismatch");
1050#endif
1051
1052 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1053 OldValRes.addDefToMIB(*getMRI(), MIB);
1054 Addr.addSrcToMIB(MIB);
1055 CmpVal.addSrcToMIB(MIB);
1056 NewVal.addSrcToMIB(MIB);
1057 MIB.addMemOperand(&MMO);
1058 return MIB;
1059}
1060
1062 unsigned Opcode, const DstOp &OldValRes,
1063 const SrcOp &Addr, const SrcOp &Val,
1064 MachineMemOperand &MMO) {
1065
1066#ifndef NDEBUG
1067 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1068 LLT AddrTy = Addr.getLLTTy(*getMRI());
1069 LLT ValTy = Val.getLLTTy(*getMRI());
1070 assert(AddrTy.isPointer() && "invalid operand type");
1071 assert(ValTy.isValid() && "invalid operand type");
1072 assert(OldValResTy == ValTy && "type mismatch");
1073 assert(MMO.isAtomic() && "not atomic mem operand");
1074#endif
1075
1076 auto MIB = buildInstr(Opcode);
1077 OldValRes.addDefToMIB(*getMRI(), MIB);
1078 Addr.addSrcToMIB(MIB);
1079 Val.addSrcToMIB(MIB);
1080 MIB.addMemOperand(&MMO);
1081 return MIB;
1082}
1083
1086 Register Val, MachineMemOperand &MMO) {
1087 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1088 MMO);
1089}
1092 Register Val, MachineMemOperand &MMO) {
1093 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1094 MMO);
1095}
1098 Register Val, MachineMemOperand &MMO) {
1099 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1100 MMO);
1101}
1104 Register Val, MachineMemOperand &MMO) {
1105 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1106 MMO);
1107}
1110 Register Val, MachineMemOperand &MMO) {
1111 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1112 MMO);
1113}
1115 Register Addr,
1116 Register Val,
1117 MachineMemOperand &MMO) {
1118 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1119 MMO);
1120}
1123 Register Val, MachineMemOperand &MMO) {
1124 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1125 MMO);
1126}
1129 Register Val, MachineMemOperand &MMO) {
1130 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1131 MMO);
1132}
1135 Register Val, MachineMemOperand &MMO) {
1136 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1137 MMO);
1138}
1141 Register Val, MachineMemOperand &MMO) {
1142 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1143 MMO);
1144}
1147 Register Val, MachineMemOperand &MMO) {
1148 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1149 MMO);
1150}
1151
1154 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1155 MachineMemOperand &MMO) {
1156 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1157 MMO);
1158}
1159
1161MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1162 MachineMemOperand &MMO) {
1163 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1164 MMO);
1165}
1166
1169 const SrcOp &Val, MachineMemOperand &MMO) {
1170 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1171 MMO);
1172}
1173
1176 const SrcOp &Val, MachineMemOperand &MMO) {
1177 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1178 MMO);
1179}
1180
1183 const SrcOp &Addr, const SrcOp &Val,
1184 MachineMemOperand &MMO) {
1185 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAXIMUM, OldValRes, Addr,
1186 Val, MMO);
1187}
1188
1191 const SrcOp &Addr, const SrcOp &Val,
1192 MachineMemOperand &MMO) {
1193 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMINIMUM, OldValRes, Addr,
1194 Val, MMO);
1195}
1196
1198MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1199 return buildInstr(TargetOpcode::G_FENCE)
1200 .addImm(Ordering)
1201 .addImm(Scope);
1202}
1203
1205 unsigned RW,
1206 unsigned Locality,
1207 unsigned CacheType,
1208 MachineMemOperand &MMO) {
1209 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1210 Addr.addSrcToMIB(MIB);
1211 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1212 MIB.addMemOperand(&MMO);
1213 return MIB;
1214}
1215
1218#ifndef NDEBUG
1219 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1220#endif
1221
1222 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1223}
1224
1225void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1226 bool IsExtend) {
1227#ifndef NDEBUG
1228 if (DstTy.isVector()) {
1229 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1230 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1231 "different number of elements in a trunc/ext");
1232 } else
1233 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1234
1235 if (IsExtend)
1236 assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1237 "invalid narrowing extend");
1238 else
1239 assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1240 "invalid widening trunc");
1241#endif
1242}
1243
1244void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1245 const LLT Op0Ty, const LLT Op1Ty) {
1246#ifndef NDEBUG
1247 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1248 "invalid operand type");
1249 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1250 if (ResTy.isScalar() || ResTy.isPointer())
1251 assert(TstTy.isScalar() && "type mismatch");
1252 else
1253 assert((TstTy.isScalar() ||
1254 (TstTy.isVector() &&
1255 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1256 "type mismatch");
1257#endif
1258}
1259
1262 ArrayRef<SrcOp> SrcOps,
1263 std::optional<unsigned> Flags) {
1264 switch (Opc) {
1265 default:
1266 break;
1267 case TargetOpcode::G_SELECT: {
1268 assert(DstOps.size() == 1 && "Invalid select");
1269 assert(SrcOps.size() == 3 && "Invalid select");
1271 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1272 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1273 break;
1274 }
1275 case TargetOpcode::G_FNEG:
1276 case TargetOpcode::G_ABS:
1277 // All these are unary ops.
1278 assert(DstOps.size() == 1 && "Invalid Dst");
1279 assert(SrcOps.size() == 1 && "Invalid Srcs");
1280 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1281 SrcOps[0].getLLTTy(*getMRI()));
1282 break;
1283 case TargetOpcode::G_ADD:
1284 case TargetOpcode::G_AND:
1285 case TargetOpcode::G_MUL:
1286 case TargetOpcode::G_OR:
1287 case TargetOpcode::G_SUB:
1288 case TargetOpcode::G_XOR:
1289 case TargetOpcode::G_UDIV:
1290 case TargetOpcode::G_SDIV:
1291 case TargetOpcode::G_UREM:
1292 case TargetOpcode::G_SREM:
1293 case TargetOpcode::G_SMIN:
1294 case TargetOpcode::G_SMAX:
1295 case TargetOpcode::G_UMIN:
1296 case TargetOpcode::G_UMAX:
1297 case TargetOpcode::G_UADDSAT:
1298 case TargetOpcode::G_SADDSAT:
1299 case TargetOpcode::G_USUBSAT:
1300 case TargetOpcode::G_SSUBSAT: {
1301 // All these are binary ops.
1302 assert(DstOps.size() == 1 && "Invalid Dst");
1303 assert(SrcOps.size() == 2 && "Invalid Srcs");
1304 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1305 SrcOps[0].getLLTTy(*getMRI()),
1306 SrcOps[1].getLLTTy(*getMRI()));
1307 break;
1308 }
1309 case TargetOpcode::G_SHL:
1310 case TargetOpcode::G_ASHR:
1311 case TargetOpcode::G_LSHR:
1312 case TargetOpcode::G_USHLSAT:
1313 case TargetOpcode::G_SSHLSAT: {
1314 assert(DstOps.size() == 1 && "Invalid Dst");
1315 assert(SrcOps.size() == 2 && "Invalid Srcs");
1316 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1317 SrcOps[0].getLLTTy(*getMRI()),
1318 SrcOps[1].getLLTTy(*getMRI()));
1319 break;
1320 }
1321 case TargetOpcode::G_SEXT:
1322 case TargetOpcode::G_ZEXT:
1323 case TargetOpcode::G_ANYEXT:
1324 assert(DstOps.size() == 1 && "Invalid Dst");
1325 assert(SrcOps.size() == 1 && "Invalid Srcs");
1326 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1327 SrcOps[0].getLLTTy(*getMRI()), true);
1328 break;
1329 case TargetOpcode::G_TRUNC:
1330 case TargetOpcode::G_FPTRUNC: {
1331 assert(DstOps.size() == 1 && "Invalid Dst");
1332 assert(SrcOps.size() == 1 && "Invalid Srcs");
1333 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1334 SrcOps[0].getLLTTy(*getMRI()), false);
1335 break;
1336 }
1337 case TargetOpcode::G_BITCAST: {
1338 assert(DstOps.size() == 1 && "Invalid Dst");
1339 assert(SrcOps.size() == 1 && "Invalid Srcs");
1340 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1341 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1342 break;
1343 }
1344 case TargetOpcode::COPY:
1345 assert(DstOps.size() == 1 && "Invalid Dst");
1346 // If the caller wants to add a subreg source it has to be done separately
1347 // so we may not have any SrcOps at this point yet.
1348 break;
1349 case TargetOpcode::G_FCMP:
1350 case TargetOpcode::G_ICMP: {
1351 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1352 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1353 // For F/ICMP, the first src operand is the predicate, followed by
1354 // the two comparands.
1355 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1356 "Expecting predicate");
1357 assert([&]() -> bool {
1358 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1359 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1360 : CmpInst::isFPPredicate(Pred);
1361 }() && "Invalid predicate");
1362 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1363 "Type mismatch");
1364 assert([&]() -> bool {
1365 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1366 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1367 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1368 return DstTy.isScalar();
1369 else
1370 return DstTy.isVector() &&
1371 DstTy.getElementCount() == Op0Ty.getElementCount();
1372 }() && "Type Mismatch");
1373 break;
1374 }
1375 case TargetOpcode::G_UNMERGE_VALUES: {
1376 assert(!DstOps.empty() && "Invalid trivial sequence");
1377 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1378 assert(llvm::all_of(DstOps,
1379 [&, this](const DstOp &Op) {
1380 return Op.getLLTTy(*getMRI()) ==
1381 DstOps[0].getLLTTy(*getMRI());
1382 }) &&
1383 "type mismatch in output list");
1384 assert((TypeSize::ScalarTy)DstOps.size() *
1385 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1386 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1387 "input operands do not cover output register");
1388 break;
1389 }
1390 case TargetOpcode::G_MERGE_VALUES: {
1391 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1392 assert(DstOps.size() == 1 && "Invalid Dst");
1393 assert(llvm::all_of(SrcOps,
1394 [&, this](const SrcOp &Op) {
1395 return Op.getLLTTy(*getMRI()) ==
1396 SrcOps[0].getLLTTy(*getMRI());
1397 }) &&
1398 "type mismatch in input list");
1399 assert((TypeSize::ScalarTy)SrcOps.size() *
1400 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1401 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1402 "input operands do not cover output register");
1403 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1404 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1405 break;
1406 }
1407 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1408 assert(DstOps.size() == 1 && "Invalid Dst size");
1409 assert(SrcOps.size() == 2 && "Invalid Src size");
1410 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1411 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1412 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1413 "Invalid operand type");
1414 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1415 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1416 DstOps[0].getLLTTy(*getMRI()) &&
1417 "Type mismatch");
1418 break;
1419 }
1420 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1421 assert(DstOps.size() == 1 && "Invalid dst size");
1422 assert(SrcOps.size() == 3 && "Invalid src size");
1423 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1424 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1425 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1426 SrcOps[1].getLLTTy(*getMRI()) &&
1427 "Type mismatch");
1428 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1429 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1430 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1431 "Type mismatch");
1432 break;
1433 }
1434 case TargetOpcode::G_BUILD_VECTOR: {
1435 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1436 "Must have at least 2 operands");
1437 assert(DstOps.size() == 1 && "Invalid DstOps");
1438 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1439 "Res type must be a vector");
1440 assert(llvm::all_of(SrcOps,
1441 [&, this](const SrcOp &Op) {
1442 return Op.getLLTTy(*getMRI()) ==
1443 SrcOps[0].getLLTTy(*getMRI());
1444 }) &&
1445 "type mismatch in input list");
1446 assert((TypeSize::ScalarTy)SrcOps.size() *
1447 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1448 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1449 "input scalars do not exactly cover the output vector register");
1450 break;
1451 }
1452 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1453 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1454 "Must have at least 2 operands");
1455 assert(DstOps.size() == 1 && "Invalid DstOps");
1456 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1457 "Res type must be a vector");
1458 assert(llvm::all_of(SrcOps,
1459 [&, this](const SrcOp &Op) {
1460 return Op.getLLTTy(*getMRI()) ==
1461 SrcOps[0].getLLTTy(*getMRI());
1462 }) &&
1463 "type mismatch in input list");
1464 break;
1465 }
1466 case TargetOpcode::G_CONCAT_VECTORS: {
1467 assert(DstOps.size() == 1 && "Invalid DstOps");
1468 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1469 "Must have at least 2 operands");
1470 assert(llvm::all_of(SrcOps,
1471 [&, this](const SrcOp &Op) {
1472 return (Op.getLLTTy(*getMRI()).isVector() &&
1473 Op.getLLTTy(*getMRI()) ==
1474 SrcOps[0].getLLTTy(*getMRI()));
1475 }) &&
1476 "type mismatch in input list");
1477 assert((TypeSize::ScalarTy)SrcOps.size() *
1478 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1479 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1480 "input vectors do not exactly cover the output vector register");
1481 break;
1482 }
1483 case TargetOpcode::G_UADDE: {
1484 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1485 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1486 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1487 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1488 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1489 "Invalid operand");
1490 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1491 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1492 "type mismatch");
1493 break;
1494 }
1495 }
1496
1497 auto MIB = buildInstr(Opc);
1498 for (const DstOp &Op : DstOps)
1499 Op.addDefToMIB(*getMRI(), MIB);
1500 for (const SrcOp &Op : SrcOps)
1501 Op.addSrcToMIB(MIB);
1502 if (Flags)
1503 MIB->setFlags(*Flags);
1504 return MIB;
1505}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Function Alias Analysis Results
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
Promote Memory to Register
Definition Mem2Reg.cpp:110
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static LLVM_ABI unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition APFloat.cpp:354
const fltSemantics & getSemantics() const
Definition APFloat.h:1439
Class for arbitrary precision integers.
Definition APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
This class holds the attributes for a particular argument, parameter, function, or return value.
Definition Attributes.h:361
The address of a basic block.
Definition Constants.h:904
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
bool isFPPredicate() const
Definition InstrTypes.h:782
bool isIntPredicate() const
Definition InstrTypes.h:783
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:282
const APFloat & getValueAPF() const
Definition Constants.h:325
This is the shared class of boolean and integer constants.
Definition Constants.h:87
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition Constants.h:162
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:168
A signed pointer, in the ptrauth sense.
Definition Constants.h:1037
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition Constants.h:1068
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition Constants.h:1071
This is an important base class in LLVM.
Definition Constant.h:43
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
PointerType * getType() const
Global values are always pointers.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr bool isPointerOrPointerVector() const
constexpr LLT getScalarType() const
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition MCInstrInfo.h:90
Metadata node.
Definition Metadata.h:1078
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWFMaximum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAXIMUM Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value, std::optional< unsigned > Flags=std::nullopt)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildObjectPtrOffset(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildAtomicRMWFMinimum(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMINIMUM Addr, Val, MMO.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = / G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, / MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void addMetadata(unsigned KindID, MDNode &MD)
Add a metadata attachment.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:216
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition TypeSize.h:223
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI AttributeSet getFnAttributes(LLVMContext &C, ID id)
Return the function attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
DWARFExpression::Operation Op
LLVM_ABI APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition Utils.cpp:658
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:761
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.