LLVM 20.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
42 getTII().get(Opcode));
43}
44
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49}
50
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr));
90}
91
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(CI);
112 else
113 MIB.addImm(CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115 MIB.addFPImm(CFP);
116 } else if (isa<ConstantPointerNull>(NumericConstant)) {
117 MIB.addImm(0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(Register());
121 }
122
123 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124 return insertInstr(MIB);
125}
126
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(Label);
134}
135
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(*getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Alignment.value());
144 return MIB;
145}
146
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(*getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154}
155
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(*getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167}
168
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(*getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176}
177
179 unsigned JTI) {
180 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181 .addJumpTableIndex(JTI);
182}
183
184void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187}
188
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193}
194
195void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199}
200
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209}
210
211std::optional<MachineInstrBuilder>
213 const LLT ValueTy, uint64_t Value) {
214 assert(Res == 0 && "Res is a result argument");
215 assert(ValueTy.isScalar() && "invalid offset type");
216
217 if (Value == 0) {
218 Res = Op0;
219 return std::nullopt;
220 }
221
223 auto Cst = buildConstant(ValueTy, Value);
224 return buildPtrAdd(Res, Op0, Cst.getReg(0));
225}
226
228 const SrcOp &Op0,
229 uint32_t NumBits) {
230 LLT PtrTy = Res.getLLTTy(*getMRI());
231 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
232 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
233 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
234 return buildPtrMask(Res, Op0, MaskReg);
235}
236
239 const SrcOp &Op0) {
240 LLT ResTy = Res.getLLTTy(*getMRI());
241 LLT Op0Ty = Op0.getLLTTy(*getMRI());
242
243 assert(ResTy.isVector() && "Res non vector type");
244
246 if (Op0Ty.isVector()) {
247 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248 "Different vector element types");
249 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250 "Op0 has more elements");
251 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
252
253 for (auto Op : Unmerge.getInstr()->defs())
254 Regs.push_back(Op.getReg());
255 } else {
256 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257 "Op0 has more size");
258 Regs.push_back(Op0.getReg());
259 }
260 Register Undef =
261 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
262 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263 for (unsigned i = 0; i < NumberOfPadElts; ++i)
264 Regs.push_back(Undef);
265 return buildMergeLikeInstr(Res, Regs);
266}
267
270 const SrcOp &Op0) {
271 LLT ResTy = Res.getLLTTy(*getMRI());
272 LLT Op0Ty = Op0.getLLTTy(*getMRI());
273
274 assert(Op0Ty.isVector() && "Non vector type");
275 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276 (ResTy.isVector() &&
277 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278 "Different vector element types");
279 assert(
280 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281 "Op0 has fewer elements");
282
283 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
284 if (ResTy.isScalar())
285 return buildCopy(Res, Unmerge.getReg(0));
287 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288 Regs.push_back(Unmerge.getReg(i));
289 return buildMergeLikeInstr(Res, Regs);
290}
291
293 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
294}
295
297 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
299}
300
302 unsigned JTI,
303 Register IndexReg) {
304 assert(getMRI()->getType(TablePtr).isPointer() &&
305 "Table reg must be a pointer");
306 return buildInstr(TargetOpcode::G_BRJT)
307 .addUse(TablePtr)
309 .addUse(IndexReg);
310}
311
313 const SrcOp &Op) {
314 return buildInstr(TargetOpcode::COPY, Res, Op);
315}
316
318 const ConstantInt &Val) {
319 LLT Ty = Res.getLLTTy(*getMRI());
320 LLT EltTy = Ty.getScalarType();
321 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
322 "creating constant with the wrong size");
323
324 assert(!Ty.isScalableVector() &&
325 "unexpected scalable vector in buildConstant");
326
327 if (Ty.isFixedVector()) {
328 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
329 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
330 .addCImm(&Val);
331 return buildSplatBuildVector(Res, Const);
332 }
333
334 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
335 Const->setDebugLoc(DebugLoc());
336 Res.addDefToMIB(*getMRI(), Const);
337 Const.addCImm(&Val);
338 return Const;
339}
340
342 int64_t Val) {
345 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
346 return buildConstant(Res, *CI);
347}
348
350 const ConstantFP &Val) {
351 LLT Ty = Res.getLLTTy(*getMRI());
352 LLT EltTy = Ty.getScalarType();
353
355 == EltTy.getSizeInBits() &&
356 "creating fconstant with the wrong size");
357
358 assert(!Ty.isPointer() && "invalid operand type");
359
360 assert(!Ty.isScalableVector() &&
361 "unexpected scalable vector in buildFConstant");
362
363 if (Ty.isFixedVector()) {
364 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
365 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
366 .addFPImm(&Val);
367
368 return buildSplatBuildVector(Res, Const);
369 }
370
371 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
372 Const->setDebugLoc(DebugLoc());
373 Res.addDefToMIB(*getMRI(), Const);
374 Const.addFPImm(&Val);
375 return Const;
376}
377
379 const APInt &Val) {
380 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
381 return buildConstant(Res, *CI);
382}
383
385 double Val) {
386 LLT DstTy = Res.getLLTTy(*getMRI());
387 auto &Ctx = getMF().getFunction().getContext();
388 auto *CFP =
389 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
390 return buildFConstant(Res, *CFP);
391}
392
394 const APFloat &Val) {
395 auto &Ctx = getMF().getFunction().getContext();
396 auto *CFP = ConstantFP::get(Ctx, Val);
397 return buildFConstant(Res, *CFP);
398}
399
402 const ConstantPtrAuth *CPA,
403 Register Addr, Register AddrDisc) {
404 auto MIB = buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE);
405 Res.addDefToMIB(*getMRI(), MIB);
406 MIB.addUse(Addr);
407 MIB.addImm(CPA->getKey()->getZExtValue());
408 MIB.addUse(AddrDisc);
409 MIB.addImm(CPA->getDiscriminator()->getZExtValue());
410 return MIB;
411}
412
414 MachineBasicBlock &Dest) {
415 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
416
417 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
418 Tst.addSrcToMIB(MIB);
419 MIB.addMBB(&Dest);
420 return MIB;
421}
422
425 MachinePointerInfo PtrInfo, Align Alignment,
427 const AAMDNodes &AAInfo) {
428 MMOFlags |= MachineMemOperand::MOLoad;
429 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
430
431 LLT Ty = Dst.getLLTTy(*getMRI());
432 MachineMemOperand *MMO =
433 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
434 return buildLoad(Dst, Addr, *MMO);
435}
436
438 const DstOp &Res,
439 const SrcOp &Addr,
440 MachineMemOperand &MMO) {
441 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
442 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
443
444 auto MIB = buildInstr(Opcode);
445 Res.addDefToMIB(*getMRI(), MIB);
446 Addr.addSrcToMIB(MIB);
447 MIB.addMemOperand(&MMO);
448 return MIB;
449}
450
452 const DstOp &Dst, const SrcOp &BasePtr,
453 MachineMemOperand &BaseMMO, int64_t Offset) {
454 LLT LoadTy = Dst.getLLTTy(*getMRI());
455 MachineMemOperand *OffsetMMO =
456 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
457
458 if (Offset == 0) // This may be a size or type changing load.
459 return buildLoad(Dst, BasePtr, *OffsetMMO);
460
461 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
462 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
463 auto ConstOffset = buildConstant(OffsetTy, Offset);
464 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
465 return buildLoad(Dst, Ptr, *OffsetMMO);
466}
467
469 const SrcOp &Addr,
470 MachineMemOperand &MMO) {
471 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
472 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
473
474 auto MIB = buildInstr(TargetOpcode::G_STORE);
475 Val.addSrcToMIB(MIB);
476 Addr.addSrcToMIB(MIB);
477 MIB.addMemOperand(&MMO);
478 return MIB;
479}
480
483 MachinePointerInfo PtrInfo, Align Alignment,
485 const AAMDNodes &AAInfo) {
486 MMOFlags |= MachineMemOperand::MOStore;
487 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
488
489 LLT Ty = Val.getLLTTy(*getMRI());
490 MachineMemOperand *MMO =
491 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
492 return buildStore(Val, Addr, *MMO);
493}
494
496 const SrcOp &Op) {
497 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
498}
499
501 const SrcOp &Op) {
502 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
503}
504
506 const SrcOp &Op,
507 std::optional<unsigned> Flags) {
508 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
509}
510
511unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
512 const auto *TLI = getMF().getSubtarget().getTargetLowering();
513 switch (TLI->getBooleanContents(IsVec, IsFP)) {
515 return TargetOpcode::G_SEXT;
517 return TargetOpcode::G_ZEXT;
518 default:
519 return TargetOpcode::G_ANYEXT;
520 }
521}
522
524 const SrcOp &Op,
525 bool IsFP) {
526 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
527 return buildInstr(ExtOp, Res, Op);
528}
529
531 const SrcOp &Op,
532 bool IsVector,
533 bool IsFP) {
534 const auto *TLI = getMF().getSubtarget().getTargetLowering();
535 switch (TLI->getBooleanContents(IsVector, IsFP)) {
537 return buildSExtInReg(Res, Op, 1);
539 return buildZExtInReg(Res, Op, 1);
541 return buildCopy(Res, Op);
542 }
543
544 llvm_unreachable("unexpected BooleanContent");
545}
546
548 const DstOp &Res,
549 const SrcOp &Op) {
550 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
551 TargetOpcode::G_SEXT == ExtOpc) &&
552 "Expecting Extending Opc");
553 assert(Res.getLLTTy(*getMRI()).isScalar() ||
554 Res.getLLTTy(*getMRI()).isVector());
555 assert(Res.getLLTTy(*getMRI()).isScalar() ==
556 Op.getLLTTy(*getMRI()).isScalar());
557
558 unsigned Opcode = TargetOpcode::COPY;
559 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
560 Op.getLLTTy(*getMRI()).getSizeInBits())
561 Opcode = ExtOpc;
562 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
563 Op.getLLTTy(*getMRI()).getSizeInBits())
564 Opcode = TargetOpcode::G_TRUNC;
565 else
566 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
567
568 return buildInstr(Opcode, Res, Op);
569}
570
572 const SrcOp &Op) {
573 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
574}
575
577 const SrcOp &Op) {
578 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
579}
580
582 const SrcOp &Op) {
583 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
584}
585
587 const SrcOp &Op,
588 int64_t ImmOp) {
589 LLT ResTy = Res.getLLTTy(*getMRI());
590 auto Mask = buildConstant(
591 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
592 return buildAnd(Res, Op, Mask);
593}
594
596 const SrcOp &Src) {
597 LLT SrcTy = Src.getLLTTy(*getMRI());
598 LLT DstTy = Dst.getLLTTy(*getMRI());
599 if (SrcTy == DstTy)
600 return buildCopy(Dst, Src);
601
602 unsigned Opcode;
603 if (SrcTy.isPointerOrPointerVector())
604 Opcode = TargetOpcode::G_PTRTOINT;
605 else if (DstTy.isPointerOrPointerVector())
606 Opcode = TargetOpcode::G_INTTOPTR;
607 else {
609 !DstTy.isPointerOrPointerVector() && "no G_ADDRCAST yet");
610 Opcode = TargetOpcode::G_BITCAST;
611 }
612
613 return buildInstr(Opcode, Dst, Src);
614}
615
617 const SrcOp &Src,
618 uint64_t Index) {
619 LLT SrcTy = Src.getLLTTy(*getMRI());
620 LLT DstTy = Dst.getLLTTy(*getMRI());
621
622#ifndef NDEBUG
623 assert(SrcTy.isValid() && "invalid operand type");
624 assert(DstTy.isValid() && "invalid operand type");
625 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
626 "extracting off end of register");
627#endif
628
629 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
630 assert(Index == 0 && "insertion past the end of a register");
631 return buildCast(Dst, Src);
632 }
633
634 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
635 Dst.addDefToMIB(*getMRI(), Extract);
636 Src.addSrcToMIB(Extract);
637 Extract.addImm(Index);
638 return Extract;
639}
640
642 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
643}
644
646 ArrayRef<Register> Ops) {
647 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
648 // we need some temporary storage for the DstOp objects. Here we use a
649 // sufficiently large SmallVector to not go through the heap.
650 SmallVector<SrcOp, 8> TmpVec(Ops);
651 assert(TmpVec.size() > 1);
652 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
653}
654
657 ArrayRef<Register> Ops) {
658 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
659 // we need some temporary storage for the DstOp objects. Here we use a
660 // sufficiently large SmallVector to not go through the heap.
661 SmallVector<SrcOp, 8> TmpVec(Ops);
662 assert(TmpVec.size() > 1);
663 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
664}
665
668 std::initializer_list<SrcOp> Ops) {
669 assert(Ops.size() > 1);
670 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
671}
672
673unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
674 ArrayRef<SrcOp> SrcOps) const {
675 if (DstOp.getLLTTy(*getMRI()).isVector()) {
676 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
677 return TargetOpcode::G_CONCAT_VECTORS;
678 return TargetOpcode::G_BUILD_VECTOR;
679 }
680
681 return TargetOpcode::G_MERGE_VALUES;
682}
683
685 const SrcOp &Op) {
686 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
687 // we need some temporary storage for the DstOp objects. Here we use a
688 // sufficiently large SmallVector to not go through the heap.
689 SmallVector<DstOp, 8> TmpVec(Res);
690 assert(TmpVec.size() > 1);
691 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
692}
693
695 const SrcOp &Op) {
696 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
697 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
698 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
699}
700
703 const SrcOp &Op) {
704 LLT OpTy = Op.getLLTTy(*getMRI());
705 unsigned NumRegs = OpTy.getSizeInBits() / Attrs.Ty.getSizeInBits();
706 SmallVector<DstOp, 8> TmpVec(NumRegs, Attrs);
707 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
708}
709
711 const SrcOp &Op) {
712 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
713 // we need some temporary storage for the DstOp objects. Here we use a
714 // sufficiently large SmallVector to not go through the heap.
715 SmallVector<DstOp, 8> TmpVec(Res);
716 assert(TmpVec.size() > 1);
717 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
718}
719
721 ArrayRef<Register> Ops) {
722 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
723 // we need some temporary storage for the DstOp objects. Here we use a
724 // sufficiently large SmallVector to not go through the heap.
725 SmallVector<SrcOp, 8> TmpVec(Ops);
726 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
727}
728
731 ArrayRef<APInt> Ops) {
732 SmallVector<SrcOp> TmpVec;
733 TmpVec.reserve(Ops.size());
734 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
735 for (const auto &Op : Ops)
736 TmpVec.push_back(buildConstant(EltTy, Op));
737 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
738}
739
741 const SrcOp &Src) {
743 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
744}
745
748 ArrayRef<Register> Ops) {
749 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
750 // we need some temporary storage for the DstOp objects. Here we use a
751 // sufficiently large SmallVector to not go through the heap.
752 SmallVector<SrcOp, 8> TmpVec(Ops);
753 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
754 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
755 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
756 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
757}
758
760 const SrcOp &Src) {
761 LLT DstTy = Res.getLLTTy(*getMRI());
762 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
763 "Expected Src to match Dst elt ty");
764 auto UndefVec = buildUndef(DstTy);
765 auto Zero = buildConstant(LLT::scalar(64), 0);
766 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
767 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
768 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
769}
770
772 const SrcOp &Src) {
773 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
774 "Expected Src to match Dst elt ty");
775 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
776}
777
779 const SrcOp &Src1,
780 const SrcOp &Src2,
781 ArrayRef<int> Mask) {
782 LLT DstTy = Res.getLLTTy(*getMRI());
783 LLT Src1Ty = Src1.getLLTTy(*getMRI());
784 LLT Src2Ty = Src2.getLLTTy(*getMRI());
785 const LLT DstElemTy = DstTy.isVector() ? DstTy.getElementType() : DstTy;
786 const LLT ElemTy1 = Src1Ty.isVector() ? Src1Ty.getElementType() : Src1Ty;
787 const LLT ElemTy2 = Src2Ty.isVector() ? Src2Ty.getElementType() : Src2Ty;
788 assert(DstElemTy == ElemTy1 && DstElemTy == ElemTy2);
789 (void)DstElemTy;
790 (void)ElemTy1;
791 (void)ElemTy2;
792 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
793 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
794 .addShuffleMask(MaskAlloc);
795}
796
799 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
800 // we need some temporary storage for the DstOp objects. Here we use a
801 // sufficiently large SmallVector to not go through the heap.
802 SmallVector<SrcOp, 8> TmpVec(Ops);
803 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
804}
805
807 const SrcOp &Src,
808 const SrcOp &Op,
809 unsigned Index) {
810 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
811 Res.getLLTTy(*getMRI()).getSizeInBits() &&
812 "insertion past the end of a register");
813
814 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
815 Op.getLLTTy(*getMRI()).getSizeInBits()) {
816 return buildCast(Res, Op);
817 }
818
819 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
820}
821
823 unsigned Step) {
824 unsigned Bitwidth = Res.getLLTTy(*getMRI()).getElementType().getSizeInBits();
825 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(),
826 APInt(Bitwidth, Step));
827 auto StepVector = buildInstr(TargetOpcode::G_STEP_VECTOR);
828 StepVector->setDebugLoc(DebugLoc());
829 Res.addDefToMIB(*getMRI(), StepVector);
830 StepVector.addCImm(CI);
831 return StepVector;
832}
833
835 unsigned MinElts) {
836
839 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
840 return buildVScale(Res, *CI);
841}
842
844 const ConstantInt &MinElts) {
845 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
846 VScale->setDebugLoc(DebugLoc());
847 Res.addDefToMIB(*getMRI(), VScale);
848 VScale.addCImm(&MinElts);
849 return VScale;
850}
851
853 const APInt &MinElts) {
854 ConstantInt *CI =
855 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
856 return buildVScale(Res, *CI);
857}
858
859static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
860 if (HasSideEffects && IsConvergent)
861 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
862 if (HasSideEffects)
863 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
864 if (IsConvergent)
865 return TargetOpcode::G_INTRINSIC_CONVERGENT;
866 return TargetOpcode::G_INTRINSIC;
867}
868
872 bool HasSideEffects, bool isConvergent) {
873 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
874 for (unsigned ResultReg : ResultRegs)
875 MIB.addDef(ResultReg);
876 MIB.addIntrinsicID(ID);
877 return MIB;
878}
879
882 ArrayRef<Register> ResultRegs) {
884 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
885 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
886 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
887}
888
891 bool HasSideEffects,
892 bool isConvergent) {
893 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
894 for (DstOp Result : Results)
895 Result.addDefToMIB(*getMRI(), MIB);
896 MIB.addIntrinsicID(ID);
897 return MIB;
899
902 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
903 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
904 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
905 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
906}
907
910 std::optional<unsigned> Flags) {
911 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
912}
913
916 std::optional<unsigned> Flags) {
917 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
918}
919
921 const DstOp &Res,
922 const SrcOp &Op0,
923 const SrcOp &Op1,
924 std::optional<unsigned> Flags) {
925 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1}, Flags);
926}
927
929 const DstOp &Res,
930 const SrcOp &Op0,
931 const SrcOp &Op1,
932 std::optional<unsigned> Flags) {
933
934 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
935}
936
938 const SrcOp &Op0,
939 const SrcOp &Op1) {
940 return buildInstr(TargetOpcode::G_SCMP, Res, {Op0, Op1});
941}
942
944 const SrcOp &Op0,
945 const SrcOp &Op1) {
946 return buildInstr(TargetOpcode::G_UCMP, Res, {Op0, Op1});
947}
948
951 const SrcOp &Op0, const SrcOp &Op1,
952 std::optional<unsigned> Flags) {
953
954 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
955}
956
958 const SrcOp &Src0,
959 const SrcOp &Src1,
960 unsigned Idx) {
961 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
962 {Src0, Src1, uint64_t(Idx)});
963}
964
966 const SrcOp &Src,
967 unsigned Idx) {
968 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR, Res,
969 {Src, uint64_t(Idx)});
970}
971
974 const SrcOp &Elt, const SrcOp &Idx) {
975 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
976}
977
980 const SrcOp &Idx) {
981 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
982}
983
985 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
986 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
987#ifndef NDEBUG
988 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
989 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
990 LLT AddrTy = Addr.getLLTTy(*getMRI());
991 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
992 LLT NewValTy = NewVal.getLLTTy(*getMRI());
993 assert(OldValResTy.isScalar() && "invalid operand type");
994 assert(SuccessResTy.isScalar() && "invalid operand type");
995 assert(AddrTy.isPointer() && "invalid operand type");
996 assert(CmpValTy.isValid() && "invalid operand type");
997 assert(NewValTy.isValid() && "invalid operand type");
998 assert(OldValResTy == CmpValTy && "type mismatch");
999 assert(OldValResTy == NewValTy && "type mismatch");
1000#endif
1001
1002 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
1003 OldValRes.addDefToMIB(*getMRI(), MIB);
1004 SuccessRes.addDefToMIB(*getMRI(), MIB);
1005 Addr.addSrcToMIB(MIB);
1006 CmpVal.addSrcToMIB(MIB);
1007 NewVal.addSrcToMIB(MIB);
1008 MIB.addMemOperand(&MMO);
1009 return MIB;
1010}
1011
1014 const SrcOp &CmpVal, const SrcOp &NewVal,
1015 MachineMemOperand &MMO) {
1016#ifndef NDEBUG
1017 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1018 LLT AddrTy = Addr.getLLTTy(*getMRI());
1019 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
1020 LLT NewValTy = NewVal.getLLTTy(*getMRI());
1021 assert(OldValResTy.isScalar() && "invalid operand type");
1022 assert(AddrTy.isPointer() && "invalid operand type");
1023 assert(CmpValTy.isValid() && "invalid operand type");
1024 assert(NewValTy.isValid() && "invalid operand type");
1025 assert(OldValResTy == CmpValTy && "type mismatch");
1026 assert(OldValResTy == NewValTy && "type mismatch");
1027#endif
1028
1029 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
1030 OldValRes.addDefToMIB(*getMRI(), MIB);
1031 Addr.addSrcToMIB(MIB);
1032 CmpVal.addSrcToMIB(MIB);
1033 NewVal.addSrcToMIB(MIB);
1034 MIB.addMemOperand(&MMO);
1035 return MIB;
1036}
1037
1039 unsigned Opcode, const DstOp &OldValRes,
1040 const SrcOp &Addr, const SrcOp &Val,
1041 MachineMemOperand &MMO) {
1042
1043#ifndef NDEBUG
1044 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
1045 LLT AddrTy = Addr.getLLTTy(*getMRI());
1046 LLT ValTy = Val.getLLTTy(*getMRI());
1047 assert(AddrTy.isPointer() && "invalid operand type");
1048 assert(ValTy.isValid() && "invalid operand type");
1049 assert(OldValResTy == ValTy && "type mismatch");
1050 assert(MMO.isAtomic() && "not atomic mem operand");
1051#endif
1052
1053 auto MIB = buildInstr(Opcode);
1054 OldValRes.addDefToMIB(*getMRI(), MIB);
1055 Addr.addSrcToMIB(MIB);
1056 Val.addSrcToMIB(MIB);
1057 MIB.addMemOperand(&MMO);
1058 return MIB;
1059}
1060
1063 Register Val, MachineMemOperand &MMO) {
1064 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1065 MMO);
1066}
1069 Register Val, MachineMemOperand &MMO) {
1070 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1071 MMO);
1072}
1075 Register Val, MachineMemOperand &MMO) {
1076 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1077 MMO);
1078}
1081 Register Val, MachineMemOperand &MMO) {
1082 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1083 MMO);
1084}
1087 Register Val, MachineMemOperand &MMO) {
1088 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1089 MMO);
1090}
1092 Register Addr,
1093 Register Val,
1094 MachineMemOperand &MMO) {
1095 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1096 MMO);
1097}
1100 Register Val, MachineMemOperand &MMO) {
1101 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1102 MMO);
1103}
1106 Register Val, MachineMemOperand &MMO) {
1107 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1108 MMO);
1109}
1112 Register Val, MachineMemOperand &MMO) {
1113 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1114 MMO);
1115}
1118 Register Val, MachineMemOperand &MMO) {
1119 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1120 MMO);
1121}
1124 Register Val, MachineMemOperand &MMO) {
1125 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1126 MMO);
1127}
1128
1131 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1132 MachineMemOperand &MMO) {
1133 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1134 MMO);
1135}
1136
1138MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1139 MachineMemOperand &MMO) {
1140 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1141 MMO);
1142}
1143
1146 const SrcOp &Val, MachineMemOperand &MMO) {
1147 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1148 MMO);
1149}
1150
1153 const SrcOp &Val, MachineMemOperand &MMO) {
1154 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1155 MMO);
1156}
1157
1159MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1160 return buildInstr(TargetOpcode::G_FENCE)
1161 .addImm(Ordering)
1162 .addImm(Scope);
1163}
1164
1166 unsigned RW,
1167 unsigned Locality,
1168 unsigned CacheType,
1169 MachineMemOperand &MMO) {
1170 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1171 Addr.addSrcToMIB(MIB);
1172 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1173 MIB.addMemOperand(&MMO);
1174 return MIB;
1175}
1176
1179#ifndef NDEBUG
1180 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1181#endif
1182
1183 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1184}
1185
1186void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1187 bool IsExtend) {
1188#ifndef NDEBUG
1189 if (DstTy.isVector()) {
1190 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1191 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1192 "different number of elements in a trunc/ext");
1193 } else
1194 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1195
1196 if (IsExtend)
1198 "invalid narrowing extend");
1199 else
1201 "invalid widening trunc");
1202#endif
1203}
1204
1205void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1206 const LLT Op0Ty, const LLT Op1Ty) {
1207#ifndef NDEBUG
1208 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1209 "invalid operand type");
1210 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1211 if (ResTy.isScalar() || ResTy.isPointer())
1212 assert(TstTy.isScalar() && "type mismatch");
1213 else
1214 assert((TstTy.isScalar() ||
1215 (TstTy.isVector() &&
1216 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1217 "type mismatch");
1218#endif
1219}
1220
1223 ArrayRef<SrcOp> SrcOps,
1224 std::optional<unsigned> Flags) {
1225 switch (Opc) {
1226 default:
1227 break;
1228 case TargetOpcode::G_SELECT: {
1229 assert(DstOps.size() == 1 && "Invalid select");
1230 assert(SrcOps.size() == 3 && "Invalid select");
1232 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1233 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1234 break;
1235 }
1236 case TargetOpcode::G_FNEG:
1237 case TargetOpcode::G_ABS:
1238 // All these are unary ops.
1239 assert(DstOps.size() == 1 && "Invalid Dst");
1240 assert(SrcOps.size() == 1 && "Invalid Srcs");
1241 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1242 SrcOps[0].getLLTTy(*getMRI()));
1243 break;
1244 case TargetOpcode::G_ADD:
1245 case TargetOpcode::G_AND:
1246 case TargetOpcode::G_MUL:
1247 case TargetOpcode::G_OR:
1248 case TargetOpcode::G_SUB:
1249 case TargetOpcode::G_XOR:
1250 case TargetOpcode::G_UDIV:
1251 case TargetOpcode::G_SDIV:
1252 case TargetOpcode::G_UREM:
1253 case TargetOpcode::G_SREM:
1254 case TargetOpcode::G_SMIN:
1255 case TargetOpcode::G_SMAX:
1256 case TargetOpcode::G_UMIN:
1257 case TargetOpcode::G_UMAX:
1258 case TargetOpcode::G_UADDSAT:
1259 case TargetOpcode::G_SADDSAT:
1260 case TargetOpcode::G_USUBSAT:
1261 case TargetOpcode::G_SSUBSAT: {
1262 // All these are binary ops.
1263 assert(DstOps.size() == 1 && "Invalid Dst");
1264 assert(SrcOps.size() == 2 && "Invalid Srcs");
1265 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1266 SrcOps[0].getLLTTy(*getMRI()),
1267 SrcOps[1].getLLTTy(*getMRI()));
1268 break;
1269 }
1270 case TargetOpcode::G_SHL:
1271 case TargetOpcode::G_ASHR:
1272 case TargetOpcode::G_LSHR:
1273 case TargetOpcode::G_USHLSAT:
1274 case TargetOpcode::G_SSHLSAT: {
1275 assert(DstOps.size() == 1 && "Invalid Dst");
1276 assert(SrcOps.size() == 2 && "Invalid Srcs");
1277 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1278 SrcOps[0].getLLTTy(*getMRI()),
1279 SrcOps[1].getLLTTy(*getMRI()));
1280 break;
1281 }
1282 case TargetOpcode::G_SEXT:
1283 case TargetOpcode::G_ZEXT:
1284 case TargetOpcode::G_ANYEXT:
1285 assert(DstOps.size() == 1 && "Invalid Dst");
1286 assert(SrcOps.size() == 1 && "Invalid Srcs");
1287 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1288 SrcOps[0].getLLTTy(*getMRI()), true);
1289 break;
1290 case TargetOpcode::G_TRUNC:
1291 case TargetOpcode::G_FPTRUNC: {
1292 assert(DstOps.size() == 1 && "Invalid Dst");
1293 assert(SrcOps.size() == 1 && "Invalid Srcs");
1294 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1295 SrcOps[0].getLLTTy(*getMRI()), false);
1296 break;
1297 }
1298 case TargetOpcode::G_BITCAST: {
1299 assert(DstOps.size() == 1 && "Invalid Dst");
1300 assert(SrcOps.size() == 1 && "Invalid Srcs");
1301 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1302 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1303 break;
1304 }
1305 case TargetOpcode::COPY:
1306 assert(DstOps.size() == 1 && "Invalid Dst");
1307 // If the caller wants to add a subreg source it has to be done separately
1308 // so we may not have any SrcOps at this point yet.
1309 break;
1310 case TargetOpcode::G_FCMP:
1311 case TargetOpcode::G_ICMP: {
1312 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1313 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1314 // For F/ICMP, the first src operand is the predicate, followed by
1315 // the two comparands.
1316 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1317 "Expecting predicate");
1318 assert([&]() -> bool {
1319 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1320 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1321 : CmpInst::isFPPredicate(Pred);
1322 }() && "Invalid predicate");
1323 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1324 "Type mismatch");
1325 assert([&]() -> bool {
1326 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1327 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1328 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1329 return DstTy.isScalar();
1330 else
1331 return DstTy.isVector() &&
1332 DstTy.getElementCount() == Op0Ty.getElementCount();
1333 }() && "Type Mismatch");
1334 break;
1335 }
1336 case TargetOpcode::G_UNMERGE_VALUES: {
1337 assert(!DstOps.empty() && "Invalid trivial sequence");
1338 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1339 assert(llvm::all_of(DstOps,
1340 [&, this](const DstOp &Op) {
1341 return Op.getLLTTy(*getMRI()) ==
1342 DstOps[0].getLLTTy(*getMRI());
1343 }) &&
1344 "type mismatch in output list");
1345 assert((TypeSize::ScalarTy)DstOps.size() *
1346 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1347 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1348 "input operands do not cover output register");
1349 break;
1350 }
1351 case TargetOpcode::G_MERGE_VALUES: {
1352 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1353 assert(DstOps.size() == 1 && "Invalid Dst");
1354 assert(llvm::all_of(SrcOps,
1355 [&, this](const SrcOp &Op) {
1356 return Op.getLLTTy(*getMRI()) ==
1357 SrcOps[0].getLLTTy(*getMRI());
1358 }) &&
1359 "type mismatch in input list");
1360 assert((TypeSize::ScalarTy)SrcOps.size() *
1361 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1362 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1363 "input operands do not cover output register");
1364 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1365 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1366 break;
1367 }
1368 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1369 assert(DstOps.size() == 1 && "Invalid Dst size");
1370 assert(SrcOps.size() == 2 && "Invalid Src size");
1371 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1372 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1373 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1374 "Invalid operand type");
1375 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1376 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1377 DstOps[0].getLLTTy(*getMRI()) &&
1378 "Type mismatch");
1379 break;
1380 }
1381 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1382 assert(DstOps.size() == 1 && "Invalid dst size");
1383 assert(SrcOps.size() == 3 && "Invalid src size");
1384 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1385 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1386 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1387 SrcOps[1].getLLTTy(*getMRI()) &&
1388 "Type mismatch");
1389 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1390 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1391 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1392 "Type mismatch");
1393 break;
1394 }
1395 case TargetOpcode::G_BUILD_VECTOR: {
1396 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1397 "Must have at least 2 operands");
1398 assert(DstOps.size() == 1 && "Invalid DstOps");
1399 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1400 "Res type must be a vector");
1401 assert(llvm::all_of(SrcOps,
1402 [&, this](const SrcOp &Op) {
1403 return Op.getLLTTy(*getMRI()) ==
1404 SrcOps[0].getLLTTy(*getMRI());
1405 }) &&
1406 "type mismatch in input list");
1407 assert((TypeSize::ScalarTy)SrcOps.size() *
1408 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1409 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1410 "input scalars do not exactly cover the output vector register");
1411 break;
1412 }
1413 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1414 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1415 "Must have at least 2 operands");
1416 assert(DstOps.size() == 1 && "Invalid DstOps");
1417 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1418 "Res type must be a vector");
1419 assert(llvm::all_of(SrcOps,
1420 [&, this](const SrcOp &Op) {
1421 return Op.getLLTTy(*getMRI()) ==
1422 SrcOps[0].getLLTTy(*getMRI());
1423 }) &&
1424 "type mismatch in input list");
1425 break;
1426 }
1427 case TargetOpcode::G_CONCAT_VECTORS: {
1428 assert(DstOps.size() == 1 && "Invalid DstOps");
1429 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1430 "Must have at least 2 operands");
1431 assert(llvm::all_of(SrcOps,
1432 [&, this](const SrcOp &Op) {
1433 return (Op.getLLTTy(*getMRI()).isVector() &&
1434 Op.getLLTTy(*getMRI()) ==
1435 SrcOps[0].getLLTTy(*getMRI()));
1436 }) &&
1437 "type mismatch in input list");
1438 assert((TypeSize::ScalarTy)SrcOps.size() *
1439 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1440 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1441 "input vectors do not exactly cover the output vector register");
1442 break;
1443 }
1444 case TargetOpcode::G_UADDE: {
1445 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1446 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1447 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1448 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1449 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1450 "Invalid operand");
1451 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1452 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1453 "type mismatch");
1454 break;
1455 }
1456 }
1457
1458 auto MIB = buildInstr(Opc);
1459 for (const DstOp &Op : DstOps)
1460 Op.addDefToMIB(*getMRI(), MIB);
1461 for (const SrcOp &Op : SrcOps)
1462 Op.addSrcToMIB(MIB);
1463 if (Flags)
1464 MIB->setFlags(*Flags);
1465 return MIB;
1466}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:235
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1453
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:306
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
The address of a basic block.
Definition: Constants.h:893
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
bool isFPPredicate() const
Definition: InstrTypes.h:780
bool isIntPredicate() const
Definition: InstrTypes.h:781
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
const APFloat & getValueAPF() const
Definition: Constants.h:314
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:151
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1021
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1051
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1054
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:295
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:181
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:264
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:190
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:277
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:183
constexpr bool isPointerOrPointerVector() const
Definition: LowLevelType.h:153
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:177
constexpr LLT getScalarType() const
Definition: LowLevelType.h:205
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:1073
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_UCMP Op0, Op1.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSCmp(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_SCMP Op0, Op1.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildStepVector(const DstOp &Res, unsigned Step)
Build and insert Res = G_STEP_VECTOR Step.
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
MachineInstrBuilder buildConstantPtrAuth(const DstOp &Res, const ConstantPtrAuth *CPA, Register Addr, Register AddrDisc)
Build and insert G_PTRAUTH_GLOBAL_VALUE.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:703
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:78
void reserve(size_type N)
Definition: SmallVector.h:663
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:218
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:225
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
DWARFExpression::Operation Op
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:651
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:764
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:370
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
MDNode * MMRA
MMRA Metadata to be set on any instruction we create.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...
All attributes(register class or bank and low-level type) a virtual register can have.