LLVM 18.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
32 State.Observer = nullptr;
33}
34
35//------------------------------------------------------------------------------
36// Build instruction variants.
37//------------------------------------------------------------------------------
38
40 return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41}
42
44 getMBB().insert(getInsertPt(), MIB);
45 recordInsertion(MIB);
46 return MIB;
47}
48
51 const MDNode *Expr) {
52 assert(isa<DILocalVariable>(Variable) && "not a variable");
53 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54 assert(
55 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56 "Expected inlined-at fields to agree");
57 return insertInstr(BuildMI(getMF(), getDL(),
58 getTII().get(TargetOpcode::DBG_VALUE),
59 /*IsIndirect*/ false, Reg, Variable, Expr));
60}
61
64 const MDNode *Expr) {
65 assert(isa<DILocalVariable>(Variable) && "not a variable");
66 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67 assert(
68 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69 "Expected inlined-at fields to agree");
70 return insertInstr(BuildMI(getMF(), getDL(),
71 getTII().get(TargetOpcode::DBG_VALUE),
72 /*IsIndirect*/ true, Reg, Variable, Expr));
73}
74
76 const MDNode *Variable,
77 const MDNode *Expr) {
78 assert(isa<DILocalVariable>(Variable) && "not a variable");
79 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80 assert(
81 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82 "Expected inlined-at fields to agree");
83 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
84 .addFrameIndex(FI)
85 .addImm(0)
86 .addMetadata(Variable)
87 .addMetadata(Expr));
88}
89
91 const MDNode *Variable,
92 const MDNode *Expr) {
93 assert(isa<DILocalVariable>(Variable) && "not a variable");
94 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95 assert(
96 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97 "Expected inlined-at fields to agree");
98 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99
100 auto *NumericConstant = [&] () -> const Constant* {
101 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102 if (CE->getOpcode() == Instruction::IntToPtr)
103 return CE->getOperand(0);
104 return &C;
105 }();
106
107 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108 if (CI->getBitWidth() > 64)
109 MIB.addCImm(CI);
110 else
111 MIB.addImm(CI->getZExtValue());
112 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113 MIB.addFPImm(CFP);
114 } else if (isa<ConstantPointerNull>(NumericConstant)) {
115 MIB.addImm(0);
116 } else {
117 // Insert $noreg if we didn't find a usable constant and had to drop it.
118 MIB.addReg(Register());
119 }
120
121 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122 return insertInstr(MIB);
123}
124
126 assert(isa<DILabel>(Label) && "not a label");
127 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128 "Expected inlined-at fields to agree");
129 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130
131 return MIB.addMetadata(Label);
132}
133
135 const SrcOp &Size,
136 Align Alignment) {
137 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139 Res.addDefToMIB(*getMRI(), MIB);
140 Size.addSrcToMIB(MIB);
141 MIB.addImm(Alignment.value());
142 return MIB;
143}
144
146 int Idx) {
147 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149 Res.addDefToMIB(*getMRI(), MIB);
150 MIB.addFrameIndex(Idx);
151 return MIB;
152}
153
155 const GlobalValue *GV) {
156 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158 GV->getType()->getAddressSpace() &&
159 "address space mismatch");
160
161 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162 Res.addDefToMIB(*getMRI(), MIB);
163 MIB.addGlobalAddress(GV);
164 return MIB;
165}
166
168 unsigned Idx) {
169 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
171 Res.addDefToMIB(*getMRI(), MIB);
172 MIB.addConstantPoolIndex(Idx);
173 return MIB;
174}
175
177 unsigned JTI) {
178 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
179 .addJumpTableIndex(JTI);
180}
181
182void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
183 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
184 assert((Res == Op0) && "type mismatch");
185}
186
188 const LLT Op1) {
189 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190 assert((Res == Op0 && Res == Op1) && "type mismatch");
191}
192
193void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
194 const LLT Op1) {
195 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196 assert((Res == Op0) && "type mismatch");
197}
198
201 const SrcOp &Op1, std::optional<unsigned> Flags) {
202 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
203 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
204 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
205
206 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
207}
208
209std::optional<MachineInstrBuilder>
211 const LLT ValueTy, uint64_t Value) {
212 assert(Res == 0 && "Res is a result argument");
213 assert(ValueTy.isScalar() && "invalid offset type");
214
215 if (Value == 0) {
216 Res = Op0;
217 return std::nullopt;
218 }
219
221 auto Cst = buildConstant(ValueTy, Value);
222 return buildPtrAdd(Res, Op0, Cst.getReg(0));
223}
224
226 const SrcOp &Op0,
227 uint32_t NumBits) {
228 LLT PtrTy = Res.getLLTTy(*getMRI());
229 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
230 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
231 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
232 return buildPtrMask(Res, Op0, MaskReg);
233}
234
237 const SrcOp &Op0) {
238 LLT ResTy = Res.getLLTTy(*getMRI());
239 LLT Op0Ty = Op0.getLLTTy(*getMRI());
240
241 assert(ResTy.isVector() && "Res non vector type");
242
244 if (Op0Ty.isVector()) {
245 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
246 "Different vector element types");
247 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
248 "Op0 has more elements");
249 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
250
251 for (auto Op : Unmerge.getInstr()->defs())
252 Regs.push_back(Op.getReg());
253 } else {
254 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
255 "Op0 has more size");
256 Regs.push_back(Op0.getReg());
257 }
258 Register Undef =
259 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
260 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
261 for (unsigned i = 0; i < NumberOfPadElts; ++i)
262 Regs.push_back(Undef);
263 return buildMergeLikeInstr(Res, Regs);
264}
265
268 const SrcOp &Op0) {
269 LLT ResTy = Res.getLLTTy(*getMRI());
270 LLT Op0Ty = Op0.getLLTTy(*getMRI());
271
272 assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
273 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
274 "Different vector element types");
275 assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
276 "Op0 has fewer elements");
277
279 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
280 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
281 Regs.push_back(Unmerge.getReg(i));
282 return buildMergeLikeInstr(Res, Regs);
283}
284
286 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
287}
288
290 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
291 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
292}
293
295 unsigned JTI,
296 Register IndexReg) {
297 assert(getMRI()->getType(TablePtr).isPointer() &&
298 "Table reg must be a pointer");
299 return buildInstr(TargetOpcode::G_BRJT)
300 .addUse(TablePtr)
302 .addUse(IndexReg);
303}
304
306 const SrcOp &Op) {
307 return buildInstr(TargetOpcode::COPY, Res, Op);
308}
309
311 const ConstantInt &Val) {
312 LLT Ty = Res.getLLTTy(*getMRI());
313 LLT EltTy = Ty.getScalarType();
314 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
315 "creating constant with the wrong size");
316
317 if (Ty.isVector()) {
318 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
319 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
320 .addCImm(&Val);
321 return buildSplatVector(Res, Const);
322 }
323
324 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
325 Const->setDebugLoc(DebugLoc());
326 Res.addDefToMIB(*getMRI(), Const);
327 Const.addCImm(&Val);
328 return Const;
329}
330
332 int64_t Val) {
335 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
336 return buildConstant(Res, *CI);
337}
338
340 const ConstantFP &Val) {
341 LLT Ty = Res.getLLTTy(*getMRI());
342 LLT EltTy = Ty.getScalarType();
343
345 == EltTy.getSizeInBits() &&
346 "creating fconstant with the wrong size");
347
348 assert(!Ty.isPointer() && "invalid operand type");
349
350 if (Ty.isVector()) {
351 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
352 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
353 .addFPImm(&Val);
354
355 return buildSplatVector(Res, Const);
356 }
357
358 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
359 Const->setDebugLoc(DebugLoc());
360 Res.addDefToMIB(*getMRI(), Const);
361 Const.addFPImm(&Val);
362 return Const;
363}
364
366 const APInt &Val) {
368 return buildConstant(Res, *CI);
369}
370
372 double Val) {
373 LLT DstTy = Res.getLLTTy(*getMRI());
374 auto &Ctx = getMF().getFunction().getContext();
375 auto *CFP =
377 return buildFConstant(Res, *CFP);
378}
379
381 const APFloat &Val) {
382 auto &Ctx = getMF().getFunction().getContext();
383 auto *CFP = ConstantFP::get(Ctx, Val);
384 return buildFConstant(Res, *CFP);
385}
386
388 MachineBasicBlock &Dest) {
389 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
390
391 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
392 Tst.addSrcToMIB(MIB);
393 MIB.addMBB(&Dest);
394 return MIB;
395}
396
399 MachinePointerInfo PtrInfo, Align Alignment,
401 const AAMDNodes &AAInfo) {
402 MMOFlags |= MachineMemOperand::MOLoad;
403 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
404
405 LLT Ty = Dst.getLLTTy(*getMRI());
406 MachineMemOperand *MMO =
407 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
408 return buildLoad(Dst, Addr, *MMO);
409}
410
412 const DstOp &Res,
413 const SrcOp &Addr,
414 MachineMemOperand &MMO) {
415 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
416 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
417
418 auto MIB = buildInstr(Opcode);
419 Res.addDefToMIB(*getMRI(), MIB);
420 Addr.addSrcToMIB(MIB);
421 MIB.addMemOperand(&MMO);
422 return MIB;
423}
424
426 const DstOp &Dst, const SrcOp &BasePtr,
427 MachineMemOperand &BaseMMO, int64_t Offset) {
428 LLT LoadTy = Dst.getLLTTy(*getMRI());
429 MachineMemOperand *OffsetMMO =
430 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
431
432 if (Offset == 0) // This may be a size or type changing load.
433 return buildLoad(Dst, BasePtr, *OffsetMMO);
434
435 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
436 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
437 auto ConstOffset = buildConstant(OffsetTy, Offset);
438 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
439 return buildLoad(Dst, Ptr, *OffsetMMO);
440}
441
443 const SrcOp &Addr,
444 MachineMemOperand &MMO) {
445 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
446 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
447
448 auto MIB = buildInstr(TargetOpcode::G_STORE);
449 Val.addSrcToMIB(MIB);
450 Addr.addSrcToMIB(MIB);
451 MIB.addMemOperand(&MMO);
452 return MIB;
453}
454
457 MachinePointerInfo PtrInfo, Align Alignment,
459 const AAMDNodes &AAInfo) {
460 MMOFlags |= MachineMemOperand::MOStore;
461 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
462
463 LLT Ty = Val.getLLTTy(*getMRI());
464 MachineMemOperand *MMO =
465 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
466 return buildStore(Val, Addr, *MMO);
467}
468
470 const SrcOp &Op) {
471 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
472}
473
475 const SrcOp &Op) {
476 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
477}
478
480 const SrcOp &Op) {
481 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
482}
483
484unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
485 const auto *TLI = getMF().getSubtarget().getTargetLowering();
486 switch (TLI->getBooleanContents(IsVec, IsFP)) {
488 return TargetOpcode::G_SEXT;
490 return TargetOpcode::G_ZEXT;
491 default:
492 return TargetOpcode::G_ANYEXT;
493 }
494}
495
497 const SrcOp &Op,
498 bool IsFP) {
499 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
500 return buildInstr(ExtOp, Res, Op);
501}
502
504 const SrcOp &Op,
505 bool IsVector,
506 bool IsFP) {
507 const auto *TLI = getMF().getSubtarget().getTargetLowering();
508 switch (TLI->getBooleanContents(IsVector, IsFP)) {
510 return buildSExtInReg(Res, Op, 1);
512 return buildZExtInReg(Res, Op, 1);
514 return buildCopy(Res, Op);
515 }
516
517 llvm_unreachable("unexpected BooleanContent");
518}
519
521 const DstOp &Res,
522 const SrcOp &Op) {
523 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
524 TargetOpcode::G_SEXT == ExtOpc) &&
525 "Expecting Extending Opc");
526 assert(Res.getLLTTy(*getMRI()).isScalar() ||
527 Res.getLLTTy(*getMRI()).isVector());
528 assert(Res.getLLTTy(*getMRI()).isScalar() ==
529 Op.getLLTTy(*getMRI()).isScalar());
530
531 unsigned Opcode = TargetOpcode::COPY;
532 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
533 Op.getLLTTy(*getMRI()).getSizeInBits())
534 Opcode = ExtOpc;
535 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
536 Op.getLLTTy(*getMRI()).getSizeInBits())
537 Opcode = TargetOpcode::G_TRUNC;
538 else
539 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
540
541 return buildInstr(Opcode, Res, Op);
542}
543
545 const SrcOp &Op) {
546 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
547}
548
550 const SrcOp &Op) {
551 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
552}
553
555 const SrcOp &Op) {
556 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
557}
558
560 const SrcOp &Op,
561 int64_t ImmOp) {
562 LLT ResTy = Res.getLLTTy(*getMRI());
563 auto Mask = buildConstant(
564 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
565 return buildAnd(Res, Op, Mask);
566}
567
569 const SrcOp &Src) {
570 LLT SrcTy = Src.getLLTTy(*getMRI());
571 LLT DstTy = Dst.getLLTTy(*getMRI());
572 if (SrcTy == DstTy)
573 return buildCopy(Dst, Src);
574
575 unsigned Opcode;
576 if (SrcTy.isPointer() && DstTy.isScalar())
577 Opcode = TargetOpcode::G_PTRTOINT;
578 else if (DstTy.isPointer() && SrcTy.isScalar())
579 Opcode = TargetOpcode::G_INTTOPTR;
580 else {
581 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
582 Opcode = TargetOpcode::G_BITCAST;
583 }
584
585 return buildInstr(Opcode, Dst, Src);
586}
587
589 const SrcOp &Src,
590 uint64_t Index) {
591 LLT SrcTy = Src.getLLTTy(*getMRI());
592 LLT DstTy = Dst.getLLTTy(*getMRI());
593
594#ifndef NDEBUG
595 assert(SrcTy.isValid() && "invalid operand type");
596 assert(DstTy.isValid() && "invalid operand type");
597 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
598 "extracting off end of register");
599#endif
600
601 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
602 assert(Index == 0 && "insertion past the end of a register");
603 return buildCast(Dst, Src);
604 }
605
606 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
607 Dst.addDefToMIB(*getMRI(), Extract);
608 Src.addSrcToMIB(Extract);
609 Extract.addImm(Index);
610 return Extract;
611}
612
614 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
615}
616
618 ArrayRef<Register> Ops) {
619 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
620 // we need some temporary storage for the DstOp objects. Here we use a
621 // sufficiently large SmallVector to not go through the heap.
622 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
623 assert(TmpVec.size() > 1);
624 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
625}
626
629 ArrayRef<Register> Ops) {
630 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
631 // we need some temporary storage for the DstOp objects. Here we use a
632 // sufficiently large SmallVector to not go through the heap.
633 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
634 assert(TmpVec.size() > 1);
635 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
636}
637
640 std::initializer_list<SrcOp> Ops) {
641 assert(Ops.size() > 1);
642 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
643}
644
645unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
646 ArrayRef<SrcOp> SrcOps) const {
647 if (DstOp.getLLTTy(*getMRI()).isVector()) {
648 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
649 return TargetOpcode::G_CONCAT_VECTORS;
650 return TargetOpcode::G_BUILD_VECTOR;
651 }
652
653 return TargetOpcode::G_MERGE_VALUES;
654}
655
657 const SrcOp &Op) {
658 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
659 // we need some temporary storage for the DstOp objects. Here we use a
660 // sufficiently large SmallVector to not go through the heap.
661 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
662 assert(TmpVec.size() > 1);
663 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
664}
665
667 const SrcOp &Op) {
668 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
669 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
670 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
671}
672
674 const SrcOp &Op) {
675 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
676 // we need some temporary storage for the DstOp objects. Here we use a
677 // sufficiently large SmallVector to not go through the heap.
678 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
679 assert(TmpVec.size() > 1);
680 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
681}
682
684 ArrayRef<Register> Ops) {
685 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
686 // we need some temporary storage for the DstOp objects. Here we use a
687 // sufficiently large SmallVector to not go through the heap.
688 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
689 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
690}
691
694 ArrayRef<APInt> Ops) {
695 SmallVector<SrcOp> TmpVec;
696 TmpVec.reserve(Ops.size());
697 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
698 for (const auto &Op : Ops)
699 TmpVec.push_back(buildConstant(EltTy, Op));
700 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
701}
702
704 const SrcOp &Src) {
705 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
706 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
707}
708
711 ArrayRef<Register> Ops) {
712 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
713 // we need some temporary storage for the DstOp objects. Here we use a
714 // sufficiently large SmallVector to not go through the heap.
715 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
716 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
717 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
718 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
719 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
720}
721
723 const SrcOp &Src) {
724 LLT DstTy = Res.getLLTTy(*getMRI());
725 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
726 "Expected Src to match Dst elt ty");
727 auto UndefVec = buildUndef(DstTy);
728 auto Zero = buildConstant(LLT::scalar(64), 0);
729 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
730 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
731 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
732}
733
735 const SrcOp &Src1,
736 const SrcOp &Src2,
737 ArrayRef<int> Mask) {
738 LLT DstTy = Res.getLLTTy(*getMRI());
739 LLT Src1Ty = Src1.getLLTTy(*getMRI());
740 LLT Src2Ty = Src2.getLLTTy(*getMRI());
741 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
742 Mask.size());
743 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
744 DstTy.getElementType() == Src2Ty.getElementType());
745 (void)DstTy;
746 (void)Src1Ty;
747 (void)Src2Ty;
748 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
749 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
750 .addShuffleMask(MaskAlloc);
751}
752
755 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
756 // we need some temporary storage for the DstOp objects. Here we use a
757 // sufficiently large SmallVector to not go through the heap.
758 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
759 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
760}
761
763 const SrcOp &Src,
764 const SrcOp &Op,
765 unsigned Index) {
766 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
767 Res.getLLTTy(*getMRI()).getSizeInBits() &&
768 "insertion past the end of a register");
769
770 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
771 Op.getLLTTy(*getMRI()).getSizeInBits()) {
772 return buildCast(Res, Op);
773 }
774
775 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
776}
777
778static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
779 if (HasSideEffects && IsConvergent)
780 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
781 if (HasSideEffects)
782 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
783 if (IsConvergent)
784 return TargetOpcode::G_INTRINSIC_CONVERGENT;
785 return TargetOpcode::G_INTRINSIC;
786}
787
790 ArrayRef<Register> ResultRegs,
791 bool HasSideEffects, bool isConvergent) {
792 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
793 for (unsigned ResultReg : ResultRegs)
794 MIB.addDef(ResultReg);
795 MIB.addIntrinsicID(ID);
796 return MIB;
797}
798
801 ArrayRef<Register> ResultRegs) {
802 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
803 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
804 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
805 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
806}
807
810 bool HasSideEffects,
811 bool isConvergent) {
812 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
813 for (DstOp Result : Results)
814 Result.addDefToMIB(*getMRI(), MIB);
815 MIB.addIntrinsicID(ID);
816 return MIB;
817}
818
821 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
822 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
823 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
824 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
825}
826
828 const SrcOp &Op) {
829 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
830}
831
834 std::optional<unsigned> Flags) {
835 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
836}
837
839 const DstOp &Res,
840 const SrcOp &Op0,
841 const SrcOp &Op1) {
842 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
843}
844
846 const DstOp &Res,
847 const SrcOp &Op0,
848 const SrcOp &Op1,
849 std::optional<unsigned> Flags) {
850
851 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
852}
853
856 const SrcOp &Op0, const SrcOp &Op1,
857 std::optional<unsigned> Flags) {
859 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
860}
861
864 const SrcOp &Elt, const SrcOp &Idx) {
865 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
866}
867
870 const SrcOp &Idx) {
871 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
875 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
876 Register NewVal, MachineMemOperand &MMO) {
877#ifndef NDEBUG
878 LLT OldValResTy = getMRI()->getType(OldValRes);
879 LLT SuccessResTy = getMRI()->getType(SuccessRes);
880 LLT AddrTy = getMRI()->getType(Addr);
881 LLT CmpValTy = getMRI()->getType(CmpVal);
882 LLT NewValTy = getMRI()->getType(NewVal);
883 assert(OldValResTy.isScalar() && "invalid operand type");
884 assert(SuccessResTy.isScalar() && "invalid operand type");
885 assert(AddrTy.isPointer() && "invalid operand type");
886 assert(CmpValTy.isValid() && "invalid operand type");
887 assert(NewValTy.isValid() && "invalid operand type");
888 assert(OldValResTy == CmpValTy && "type mismatch");
889 assert(OldValResTy == NewValTy && "type mismatch");
890#endif
891
892 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
893 .addDef(OldValRes)
894 .addDef(SuccessRes)
895 .addUse(Addr)
896 .addUse(CmpVal)
897 .addUse(NewVal)
898 .addMemOperand(&MMO);
899}
900
903 Register CmpVal, Register NewVal,
904 MachineMemOperand &MMO) {
905#ifndef NDEBUG
906 LLT OldValResTy = getMRI()->getType(OldValRes);
907 LLT AddrTy = getMRI()->getType(Addr);
908 LLT CmpValTy = getMRI()->getType(CmpVal);
909 LLT NewValTy = getMRI()->getType(NewVal);
910 assert(OldValResTy.isScalar() && "invalid operand type");
911 assert(AddrTy.isPointer() && "invalid operand type");
912 assert(CmpValTy.isValid() && "invalid operand type");
913 assert(NewValTy.isValid() && "invalid operand type");
914 assert(OldValResTy == CmpValTy && "type mismatch");
915 assert(OldValResTy == NewValTy && "type mismatch");
916#endif
917
918 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
919 .addDef(OldValRes)
920 .addUse(Addr)
921 .addUse(CmpVal)
922 .addUse(NewVal)
923 .addMemOperand(&MMO);
924}
925
927 unsigned Opcode, const DstOp &OldValRes,
928 const SrcOp &Addr, const SrcOp &Val,
929 MachineMemOperand &MMO) {
930
931#ifndef NDEBUG
932 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
933 LLT AddrTy = Addr.getLLTTy(*getMRI());
934 LLT ValTy = Val.getLLTTy(*getMRI());
935 assert(OldValResTy.isScalar() && "invalid operand type");
936 assert(AddrTy.isPointer() && "invalid operand type");
937 assert(ValTy.isValid() && "invalid operand type");
938 assert(OldValResTy == ValTy && "type mismatch");
939 assert(MMO.isAtomic() && "not atomic mem operand");
940#endif
941
942 auto MIB = buildInstr(Opcode);
943 OldValRes.addDefToMIB(*getMRI(), MIB);
944 Addr.addSrcToMIB(MIB);
945 Val.addSrcToMIB(MIB);
946 MIB.addMemOperand(&MMO);
947 return MIB;
948}
949
952 Register Val, MachineMemOperand &MMO) {
953 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
954 MMO);
955}
958 Register Val, MachineMemOperand &MMO) {
959 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
960 MMO);
961}
964 Register Val, MachineMemOperand &MMO) {
965 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
966 MMO);
967}
970 Register Val, MachineMemOperand &MMO) {
971 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
972 MMO);
973}
976 Register Val, MachineMemOperand &MMO) {
977 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
978 MMO);
979}
982 Register Val,
983 MachineMemOperand &MMO) {
984 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
985 MMO);
986}
989 Register Val, MachineMemOperand &MMO) {
990 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
991 MMO);
992}
995 Register Val, MachineMemOperand &MMO) {
996 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
997 MMO);
998}
1001 Register Val, MachineMemOperand &MMO) {
1002 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1003 MMO);
1004}
1007 Register Val, MachineMemOperand &MMO) {
1008 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1009 MMO);
1010}
1013 Register Val, MachineMemOperand &MMO) {
1014 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1015 MMO);
1016}
1017
1020 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1021 MachineMemOperand &MMO) {
1022 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1023 MMO);
1024}
1025
1027MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1028 MachineMemOperand &MMO) {
1029 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1030 MMO);
1031}
1032
1035 const SrcOp &Val, MachineMemOperand &MMO) {
1036 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1037 MMO);
1038}
1039
1042 const SrcOp &Val, MachineMemOperand &MMO) {
1043 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1044 MMO);
1045}
1046
1048MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1049 return buildInstr(TargetOpcode::G_FENCE)
1050 .addImm(Ordering)
1051 .addImm(Scope);
1052}
1053
1056#ifndef NDEBUG
1057 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1058#endif
1059
1060 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1061}
1062
1063void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1064 bool IsExtend) {
1065#ifndef NDEBUG
1066 if (DstTy.isVector()) {
1067 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1068 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
1069 "different number of elements in a trunc/ext");
1070 } else
1071 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1072
1073 if (IsExtend)
1074 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1075 "invalid narrowing extend");
1076 else
1077 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1078 "invalid widening trunc");
1079#endif
1080}
1081
1082void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1083 const LLT Op0Ty, const LLT Op1Ty) {
1084#ifndef NDEBUG
1085 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1086 "invalid operand type");
1087 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1088 if (ResTy.isScalar() || ResTy.isPointer())
1089 assert(TstTy.isScalar() && "type mismatch");
1090 else
1091 assert((TstTy.isScalar() ||
1092 (TstTy.isVector() &&
1093 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1094 "type mismatch");
1095#endif
1096}
1097
1100 ArrayRef<SrcOp> SrcOps,
1101 std::optional<unsigned> Flags) {
1102 switch (Opc) {
1103 default:
1104 break;
1105 case TargetOpcode::G_SELECT: {
1106 assert(DstOps.size() == 1 && "Invalid select");
1107 assert(SrcOps.size() == 3 && "Invalid select");
1109 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1110 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1111 break;
1112 }
1113 case TargetOpcode::G_FNEG:
1114 case TargetOpcode::G_ABS:
1115 // All these are unary ops.
1116 assert(DstOps.size() == 1 && "Invalid Dst");
1117 assert(SrcOps.size() == 1 && "Invalid Srcs");
1118 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1119 SrcOps[0].getLLTTy(*getMRI()));
1120 break;
1121 case TargetOpcode::G_ADD:
1122 case TargetOpcode::G_AND:
1123 case TargetOpcode::G_MUL:
1124 case TargetOpcode::G_OR:
1125 case TargetOpcode::G_SUB:
1126 case TargetOpcode::G_XOR:
1127 case TargetOpcode::G_UDIV:
1128 case TargetOpcode::G_SDIV:
1129 case TargetOpcode::G_UREM:
1130 case TargetOpcode::G_SREM:
1131 case TargetOpcode::G_SMIN:
1132 case TargetOpcode::G_SMAX:
1133 case TargetOpcode::G_UMIN:
1134 case TargetOpcode::G_UMAX:
1135 case TargetOpcode::G_UADDSAT:
1136 case TargetOpcode::G_SADDSAT:
1137 case TargetOpcode::G_USUBSAT:
1138 case TargetOpcode::G_SSUBSAT: {
1139 // All these are binary ops.
1140 assert(DstOps.size() == 1 && "Invalid Dst");
1141 assert(SrcOps.size() == 2 && "Invalid Srcs");
1142 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1143 SrcOps[0].getLLTTy(*getMRI()),
1144 SrcOps[1].getLLTTy(*getMRI()));
1145 break;
1146 }
1147 case TargetOpcode::G_SHL:
1148 case TargetOpcode::G_ASHR:
1149 case TargetOpcode::G_LSHR:
1150 case TargetOpcode::G_USHLSAT:
1151 case TargetOpcode::G_SSHLSAT: {
1152 assert(DstOps.size() == 1 && "Invalid Dst");
1153 assert(SrcOps.size() == 2 && "Invalid Srcs");
1154 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1155 SrcOps[0].getLLTTy(*getMRI()),
1156 SrcOps[1].getLLTTy(*getMRI()));
1157 break;
1158 }
1159 case TargetOpcode::G_SEXT:
1160 case TargetOpcode::G_ZEXT:
1161 case TargetOpcode::G_ANYEXT:
1162 assert(DstOps.size() == 1 && "Invalid Dst");
1163 assert(SrcOps.size() == 1 && "Invalid Srcs");
1164 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1165 SrcOps[0].getLLTTy(*getMRI()), true);
1166 break;
1167 case TargetOpcode::G_TRUNC:
1168 case TargetOpcode::G_FPTRUNC: {
1169 assert(DstOps.size() == 1 && "Invalid Dst");
1170 assert(SrcOps.size() == 1 && "Invalid Srcs");
1171 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1172 SrcOps[0].getLLTTy(*getMRI()), false);
1173 break;
1174 }
1175 case TargetOpcode::G_BITCAST: {
1176 assert(DstOps.size() == 1 && "Invalid Dst");
1177 assert(SrcOps.size() == 1 && "Invalid Srcs");
1178 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1179 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1180 break;
1181 }
1182 case TargetOpcode::COPY:
1183 assert(DstOps.size() == 1 && "Invalid Dst");
1184 // If the caller wants to add a subreg source it has to be done separately
1185 // so we may not have any SrcOps at this point yet.
1186 break;
1187 case TargetOpcode::G_FCMP:
1188 case TargetOpcode::G_ICMP: {
1189 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1190 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1191 // For F/ICMP, the first src operand is the predicate, followed by
1192 // the two comparands.
1193 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1194 "Expecting predicate");
1195 assert([&]() -> bool {
1196 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1197 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1198 : CmpInst::isFPPredicate(Pred);
1199 }() && "Invalid predicate");
1200 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1201 "Type mismatch");
1202 assert([&]() -> bool {
1203 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1204 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1205 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1206 return DstTy.isScalar();
1207 else
1208 return DstTy.isVector() &&
1209 DstTy.getNumElements() == Op0Ty.getNumElements();
1210 }() && "Type Mismatch");
1211 break;
1212 }
1213 case TargetOpcode::G_UNMERGE_VALUES: {
1214 assert(!DstOps.empty() && "Invalid trivial sequence");
1215 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1216 assert(llvm::all_of(DstOps,
1217 [&, this](const DstOp &Op) {
1218 return Op.getLLTTy(*getMRI()) ==
1219 DstOps[0].getLLTTy(*getMRI());
1220 }) &&
1221 "type mismatch in output list");
1222 assert((TypeSize::ScalarTy)DstOps.size() *
1223 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1224 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1225 "input operands do not cover output register");
1226 break;
1227 }
1228 case TargetOpcode::G_MERGE_VALUES: {
1229 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1230 assert(DstOps.size() == 1 && "Invalid Dst");
1231 assert(llvm::all_of(SrcOps,
1232 [&, this](const SrcOp &Op) {
1233 return Op.getLLTTy(*getMRI()) ==
1234 SrcOps[0].getLLTTy(*getMRI());
1235 }) &&
1236 "type mismatch in input list");
1237 assert((TypeSize::ScalarTy)SrcOps.size() *
1238 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1239 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1240 "input operands do not cover output register");
1241 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1242 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1243 break;
1244 }
1245 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1246 assert(DstOps.size() == 1 && "Invalid Dst size");
1247 assert(SrcOps.size() == 2 && "Invalid Src size");
1248 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1249 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1250 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1251 "Invalid operand type");
1252 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1253 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1254 DstOps[0].getLLTTy(*getMRI()) &&
1255 "Type mismatch");
1256 break;
1257 }
1258 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1259 assert(DstOps.size() == 1 && "Invalid dst size");
1260 assert(SrcOps.size() == 3 && "Invalid src size");
1261 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1262 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1263 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1264 SrcOps[1].getLLTTy(*getMRI()) &&
1265 "Type mismatch");
1266 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1267 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1268 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1269 "Type mismatch");
1270 break;
1271 }
1272 case TargetOpcode::G_BUILD_VECTOR: {
1273 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1274 "Must have at least 2 operands");
1275 assert(DstOps.size() == 1 && "Invalid DstOps");
1276 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1277 "Res type must be a vector");
1278 assert(llvm::all_of(SrcOps,
1279 [&, this](const SrcOp &Op) {
1280 return Op.getLLTTy(*getMRI()) ==
1281 SrcOps[0].getLLTTy(*getMRI());
1282 }) &&
1283 "type mismatch in input list");
1284 assert((TypeSize::ScalarTy)SrcOps.size() *
1285 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1286 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1287 "input scalars do not exactly cover the output vector register");
1288 break;
1289 }
1290 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1291 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1292 "Must have at least 2 operands");
1293 assert(DstOps.size() == 1 && "Invalid DstOps");
1294 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1295 "Res type must be a vector");
1296 assert(llvm::all_of(SrcOps,
1297 [&, this](const SrcOp &Op) {
1298 return Op.getLLTTy(*getMRI()) ==
1299 SrcOps[0].getLLTTy(*getMRI());
1300 }) &&
1301 "type mismatch in input list");
1302 break;
1303 }
1304 case TargetOpcode::G_CONCAT_VECTORS: {
1305 assert(DstOps.size() == 1 && "Invalid DstOps");
1306 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1307 "Must have at least 2 operands");
1308 assert(llvm::all_of(SrcOps,
1309 [&, this](const SrcOp &Op) {
1310 return (Op.getLLTTy(*getMRI()).isVector() &&
1311 Op.getLLTTy(*getMRI()) ==
1312 SrcOps[0].getLLTTy(*getMRI()));
1313 }) &&
1314 "type mismatch in input list");
1315 assert((TypeSize::ScalarTy)SrcOps.size() *
1316 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1317 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1318 "input vectors do not exactly cover the output vector register");
1319 break;
1320 }
1321 case TargetOpcode::G_UADDE: {
1322 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1323 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1324 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1325 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1326 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1327 "Invalid operand");
1328 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1329 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1330 "type mismatch");
1331 break;
1332 }
1333 }
1334
1335 auto MIB = buildInstr(Opc);
1336 for (const DstOp &Op : DstOps)
1337 Op.addDefToMIB(*getMRI(), MIB);
1338 for (const SrcOp &Op : SrcOps)
1339 Op.addSrcToMIB(MIB);
1340 if (Flags)
1341 MIB->setFlags(*Flags);
1342 return MIB;
1343}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1301
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
The address of a basic block.
Definition: Constants.h:874
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
bool isFPPredicate() const
Definition: InstrTypes.h:818
bool isIntPredicate() const
Definition: InstrTypes.h:819
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:260
const APFloat & getValueAPF() const
Definition: Constants.h:296
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
Definition: Constants.cpp:927
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
unsigned getBitWidth() const
getBitWidth - Return the bitwidth of this constant.
Definition: Constants.h:139
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:320
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:290
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:279
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:249
constexpr bool isScalar() const
Definition: LowLevelType.h:139
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:137
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:149
constexpr bool isVector() const
Definition: LowLevelType.h:145
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:175
constexpr bool isPointer() const
Definition: LowLevelType.h:141
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:272
constexpr LLT getScalarType() const
Definition: LowLevelType.h:190
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:950
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr, Register CmpVal, Register NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:690
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:667
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1727
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
DWARFExpression::Operation Op
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:485
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:651
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...