LLVM 22.0.0git
InstrEmitter.cpp
Go to the documentation of this file.
1//==--- InstrEmitter.cpp - Emit MachineInstrs for the SelectionDAG class ---==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the Emit routines for the SelectionDAG class, which creates
10// MachineInstrs based on the decisions of the SelectionDAG instruction
11// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "InstrEmitter.h"
16#include "SDNodeDbgValue.h"
27#include "llvm/IR/PseudoProbe.h"
30using namespace llvm;
31
32#define DEBUG_TYPE "instr-emitter"
33
34/// MinRCSize - Smallest register class we allow when constraining virtual
35/// registers. If satisfying all register class constraints would require
36/// using a smaller register class, emit a COPY to a new virtual register
37/// instead.
38const unsigned MinRCSize = 4;
39
40/// CountResults - The results of target nodes have register or immediate
41/// operands first, then an optional chain, and optional glue operands (which do
42/// not go into the resulting MachineInstr).
44 unsigned N = Node->getNumValues();
45 while (N && Node->getValueType(N - 1) == MVT::Glue)
46 --N;
47 if (N && Node->getValueType(N - 1) == MVT::Other)
48 --N; // Skip over chain result.
49 return N;
50}
51
52/// countOperands - The inputs to target nodes have any actual inputs first,
53/// followed by an optional chain operand, then an optional glue operand.
54/// Compute the number of actual operands that will go into the resulting
55/// MachineInstr.
56///
57/// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding
58/// the chain and glue. These operands may be implicit on the machine instr.
59static unsigned countOperands(SDNode *Node, unsigned NumExpUses,
60 unsigned &NumImpUses) {
61 unsigned N = Node->getNumOperands();
62 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
63 --N;
64 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
65 --N; // Ignore chain if it exists.
66
67 // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses.
68 NumImpUses = N - NumExpUses;
69 for (unsigned I = N; I > NumExpUses; --I) {
70 if (isa<RegisterMaskSDNode>(Node->getOperand(I - 1)))
71 continue;
72 if (RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Node->getOperand(I - 1)))
73 if (RN->getReg().isPhysical())
74 continue;
75 NumImpUses = N - I;
76 break;
77 }
78
79 return N;
80}
81
82/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
83/// implicit physical register output.
84void InstrEmitter::EmitCopyFromReg(SDValue Op, bool IsClone, Register SrcReg,
85 VRBaseMapType &VRBaseMap) {
86 Register VRBase;
87 if (SrcReg.isVirtual()) {
88 // Just use the input register directly!
89 if (IsClone)
90 VRBaseMap.erase(Op);
91 bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second;
92 (void)isNew; // Silence compiler warning.
93 assert(isNew && "Node emitted out of order - early");
94 return;
95 }
96
97 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
98 // the CopyToReg'd destination register instead of creating a new vreg.
99 bool MatchReg = true;
100 const TargetRegisterClass *UseRC = nullptr;
101 MVT VT = Op.getSimpleValueType();
102
103 // Stick to the preferred register classes for legal types.
104 if (TLI->isTypeLegal(VT))
105 UseRC = TLI->getRegClassFor(VT, Op->isDivergent());
106
107 for (SDNode *User : Op->users()) {
108 bool Match = true;
109 if (User->getOpcode() == ISD::CopyToReg && User->getOperand(2) == Op) {
110 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
111 if (DestReg.isVirtual()) {
112 VRBase = DestReg;
113 Match = false;
114 } else if (DestReg != SrcReg)
115 Match = false;
116 } else {
117 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
118 if (User->getOperand(i) != Op)
119 continue;
120 if (VT == MVT::Other || VT == MVT::Glue)
121 continue;
122 Match = false;
123 if (User->isMachineOpcode()) {
124 const MCInstrDesc &II = TII->get(User->getMachineOpcode());
125 const TargetRegisterClass *RC = nullptr;
126 if (i + II.getNumDefs() < II.getNumOperands()) {
127 RC = TRI->getAllocatableClass(
128 TII->getRegClass(II, i + II.getNumDefs()));
129 }
130 if (!UseRC)
131 UseRC = RC;
132 else if (RC) {
133 const TargetRegisterClass *ComRC =
134 TRI->getCommonSubClass(UseRC, RC);
135 // If multiple uses expect disjoint register classes, we emit
136 // copies in AddRegisterOperand.
137 if (ComRC)
138 UseRC = ComRC;
139 }
140 }
141 }
142 }
143 MatchReg &= Match;
144 if (VRBase)
145 break;
146 }
147
148 const TargetRegisterClass *SrcRC = nullptr, *DstRC = nullptr;
149 SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
150
151 // Figure out the register class to create for the destreg.
152 if (VRBase) {
153 DstRC = MRI->getRegClass(VRBase);
154 } else if (UseRC) {
155 assert(TRI->isTypeLegalForClass(*UseRC, VT) &&
156 "Incompatible phys register def and uses!");
157 DstRC = UseRC;
158 } else
159 DstRC = SrcRC;
160
161 // If all uses are reading from the src physical register and copying the
162 // register is either impossible or very expensive, then don't create a copy.
163 if (MatchReg && SrcRC->expensiveOrImpossibleToCopy()) {
164 VRBase = SrcReg;
165 } else {
166 // Create the reg, emit the copy.
167 VRBase = MRI->createVirtualRegister(DstRC);
168 BuildMI(*MBB, InsertPos, Op.getDebugLoc(), TII->get(TargetOpcode::COPY),
169 VRBase)
170 .addReg(SrcReg);
171 }
172
173 if (IsClone)
174 VRBaseMap.erase(Op);
175 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
176 (void)isNew; // Silence compiler warning.
177 assert(isNew && "Node emitted out of order - early");
178}
179
180void InstrEmitter::CreateVirtualRegisters(SDNode *Node,
182 const MCInstrDesc &II,
183 bool IsClone, bool IsCloned,
184 VRBaseMapType &VRBaseMap) {
185 assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
186 "IMPLICIT_DEF should have been handled as a special case elsewhere!");
187
188 unsigned NumResults = CountResults(Node);
189 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
190 II.isVariadic() && II.variadicOpsAreDefs();
191 unsigned NumVRegs = HasVRegVariadicDefs ? NumResults : II.getNumDefs();
192 if (Node->getMachineOpcode() == TargetOpcode::STATEPOINT)
193 NumVRegs = NumResults;
194 for (unsigned i = 0; i < NumVRegs; ++i) {
195 // If the specific node value is only used by a CopyToReg and the dest reg
196 // is a vreg in the same register class, use the CopyToReg'd destination
197 // register instead of creating a new vreg.
198 Register VRBase;
199 const TargetRegisterClass *RC =
200 TRI->getAllocatableClass(TII->getRegClass(II, i));
201 // Always let the value type influence the used register class. The
202 // constraints on the instruction may be too lax to represent the value
203 // type correctly. For example, a 64-bit float (X86::FR64) can't live in
204 // the 32-bit float super-class (X86::FR32).
205 if (i < NumResults && TLI->isTypeLegal(Node->getSimpleValueType(i))) {
206 const TargetRegisterClass *VTRC = TLI->getRegClassFor(
207 Node->getSimpleValueType(i),
208 (Node->isDivergent() || (RC && TRI->isDivergentRegClass(RC))));
209 if (RC)
210 VTRC = TRI->getCommonSubClass(RC, VTRC);
211 if (VTRC)
212 RC = VTRC;
213 }
214
215 if (!II.operands().empty() && II.operands()[i].isOptionalDef()) {
216 // Optional def must be a physical register.
217 VRBase = cast<RegisterSDNode>(Node->getOperand(i-NumResults))->getReg();
218 assert(VRBase.isPhysical());
219 MIB.addReg(VRBase, RegState::Define);
220 }
221
222 if (!VRBase && !IsClone && !IsCloned)
223 for (SDNode *User : Node->users()) {
224 if (User->getOpcode() == ISD::CopyToReg &&
225 User->getOperand(2).getNode() == Node &&
226 User->getOperand(2).getResNo() == i) {
227 Register Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
228 if (Reg.isVirtual()) {
229 const TargetRegisterClass *RegRC = MRI->getRegClass(Reg);
230 if (RegRC == RC) {
231 VRBase = Reg;
232 MIB.addReg(VRBase, RegState::Define);
233 break;
234 }
235 }
236 }
237 }
238
239 // Create the result registers for this node and add the result regs to
240 // the machine instruction.
241 if (!VRBase) {
242 assert(RC && "Isn't a register operand!");
243 VRBase = MRI->createVirtualRegister(RC);
244 MIB.addReg(VRBase, RegState::Define);
245 }
246
247 // If this def corresponds to a result of the SDNode insert the VRBase into
248 // the lookup map.
249 if (i < NumResults) {
250 SDValue Op(Node, i);
251 if (IsClone)
252 VRBaseMap.erase(Op);
253 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
254 (void)isNew; // Silence compiler warning.
255 assert(isNew && "Node emitted out of order - early");
256 }
257 }
258}
259
260/// getVR - Return the virtual register corresponding to the specified result
261/// of the specified node.
262Register InstrEmitter::getVR(SDValue Op, VRBaseMapType &VRBaseMap) {
263 if (Op.isMachineOpcode() &&
264 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
265 // Add an IMPLICIT_DEF instruction before every use.
266 // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
267 // does not include operand register class info.
268 const TargetRegisterClass *RC = TLI->getRegClassFor(
269 Op.getSimpleValueType(), Op.getNode()->isDivergent());
270 Register VReg = MRI->createVirtualRegister(RC);
271 BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
272 TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
273 return VReg;
274 }
275
276 VRBaseMapType::iterator I = VRBaseMap.find(Op);
277 assert(I != VRBaseMap.end() && "Node emitted out of order - late");
278 return I->second;
279}
280
282 if (Op->isMachineOpcode()) {
283 switch (Op->getMachineOpcode()) {
284 case TargetOpcode::CONVERGENCECTRL_ANCHOR:
285 case TargetOpcode::CONVERGENCECTRL_ENTRY:
286 case TargetOpcode::CONVERGENCECTRL_LOOP:
287 case TargetOpcode::CONVERGENCECTRL_GLUE:
288 return true;
289 }
290 return false;
291 }
292
293 // We can reach here when CopyFromReg is encountered. But rather than making a
294 // special case for that, we just make sure we don't reach here in some
295 // surprising way.
296 switch (Op->getOpcode()) {
297 case ISD::CONVERGENCECTRL_ANCHOR:
298 case ISD::CONVERGENCECTRL_ENTRY:
299 case ISD::CONVERGENCECTRL_LOOP:
300 case ISD::CONVERGENCECTRL_GLUE:
301 llvm_unreachable("Convergence control should have been selected by now.");
302 }
303 return false;
304}
305
306/// AddRegisterOperand - Add the specified register as an operand to the
307/// specified machine instr. Insert register copies if the register is
308/// not in the required register class.
309void
310InstrEmitter::AddRegisterOperand(MachineInstrBuilder &MIB,
311 SDValue Op,
312 unsigned IIOpNum,
313 const MCInstrDesc *II,
314 VRBaseMapType &VRBaseMap,
315 bool IsDebug, bool IsClone, bool IsCloned) {
316 assert(Op.getValueType() != MVT::Other &&
317 Op.getValueType() != MVT::Glue &&
318 "Chain and glue operands should occur at end of operand list!");
319 // Get/emit the operand.
320 Register VReg = getVR(Op, VRBaseMap);
321
322 const MCInstrDesc &MCID = MIB->getDesc();
323 bool isOptDef = IIOpNum < MCID.getNumOperands() &&
324 MCID.operands()[IIOpNum].isOptionalDef();
325
326 // If the instruction requires a register in a different class, create
327 // a new virtual register and copy the value into it, but first attempt to
328 // shrink VReg's register class within reason. For example, if VReg == GR32
329 // and II requires a GR32_NOSP, just constrain VReg to GR32_NOSP.
330 if (II) {
331 const TargetRegisterClass *OpRC = nullptr;
332 if (IIOpNum < II->getNumOperands())
333 OpRC = TII->getRegClass(*II, IIOpNum);
334
335 if (OpRC) {
336 unsigned MinNumRegs = MinRCSize;
337 // Don't apply any RC size limit for IMPLICIT_DEF. Each use has a unique
338 // virtual register.
339 if (Op.isMachineOpcode() &&
340 Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)
341 MinNumRegs = 0;
342
343 const TargetRegisterClass *ConstrainedRC
344 = MRI->constrainRegClass(VReg, OpRC, MinNumRegs);
345 if (!ConstrainedRC) {
346 OpRC = TRI->getAllocatableClass(OpRC);
347 assert(OpRC && "Constraints cannot be fulfilled for allocation");
348 Register NewVReg = MRI->createVirtualRegister(OpRC);
349 BuildMI(*MBB, InsertPos, MIB->getDebugLoc(),
350 TII->get(TargetOpcode::COPY), NewVReg)
351 .addReg(VReg);
352 VReg = NewVReg;
353 } else {
354 assert(ConstrainedRC->isAllocatable() &&
355 "Constraining an allocatable VReg produced an unallocatable class?");
356 }
357 }
358 }
359
360 // If this value has only one use, that use is a kill. This is a
361 // conservative approximation. InstrEmitter does trivial coalescing
362 // with CopyFromReg nodes, so don't emit kill flags for them.
363 // Avoid kill flags on Schedule cloned nodes, since there will be
364 // multiple uses.
365 // Tied operands are never killed, so we need to check that. And that
366 // means we need to determine the index of the operand.
367 // Don't kill convergence control tokens. Initially they are only used in glue
368 // nodes, and the InstrEmitter later adds implicit uses on the users of the
369 // glue node. This can sometimes make it seem like there is only one use,
370 // which is the glue node itself.
371 bool isKill = Op.hasOneUse() && !isConvergenceCtrlMachineOp(Op) &&
372 Op.getNode()->getOpcode() != ISD::CopyFromReg && !IsDebug &&
373 !(IsClone || IsCloned);
374 if (isKill) {
375 unsigned Idx = MIB->getNumOperands();
376 while (Idx > 0 &&
377 MIB->getOperand(Idx-1).isReg() &&
378 MIB->getOperand(Idx-1).isImplicit())
379 --Idx;
380 bool isTied = MCID.getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
381 if (isTied)
382 isKill = false;
383 }
384
385 MIB.addReg(VReg, getDefRegState(isOptDef) | getKillRegState(isKill) |
386 getDebugRegState(IsDebug));
387}
388
389/// AddOperand - Add the specified operand to the specified machine instr. II
390/// specifies the instruction information for the node, and IIOpNum is the
391/// operand number (in the II) that we are adding.
392void InstrEmitter::AddOperand(MachineInstrBuilder &MIB, SDValue Op,
393 unsigned IIOpNum, const MCInstrDesc *II,
394 VRBaseMapType &VRBaseMap, bool IsDebug,
395 bool IsClone, bool IsCloned) {
396 if (Op.isMachineOpcode()) {
397 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
398 IsDebug, IsClone, IsCloned);
399 } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
400 if (C->getAPIntValue().getSignificantBits() <= 64) {
401 MIB.addImm(C->getSExtValue());
402 } else {
403 MIB.addCImm(
404 ConstantInt::get(MF->getFunction().getContext(), C->getAPIntValue()));
405 }
406 } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
407 MIB.addFPImm(F->getConstantFPValue());
408 } else if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
409 Register VReg = R->getReg();
410 MVT OpVT = Op.getSimpleValueType();
411 const TargetRegisterClass *IIRC =
412 II ? TRI->getAllocatableClass(TII->getRegClass(*II, IIOpNum)) : nullptr;
413 const TargetRegisterClass *OpRC =
414 TLI->isTypeLegal(OpVT)
415 ? TLI->getRegClassFor(OpVT,
416 Op.getNode()->isDivergent() ||
417 (IIRC && TRI->isDivergentRegClass(IIRC)))
418 : nullptr;
419
420 if (OpRC && IIRC && OpRC != IIRC && VReg.isVirtual()) {
421 Register NewVReg = MRI->createVirtualRegister(IIRC);
422 BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
423 TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
424 VReg = NewVReg;
425 }
426 // Turn additional physreg operands into implicit uses on non-variadic
427 // instructions. This is used by call and return instructions passing
428 // arguments in registers.
429 bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic());
430 MIB.addReg(VReg, getImplRegState(Imp));
431 } else if (RegisterMaskSDNode *RM = dyn_cast<RegisterMaskSDNode>(Op)) {
432 MIB.addRegMask(RM->getRegMask());
433 } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
434 MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(),
435 TGA->getTargetFlags());
436 } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
437 MIB.addMBB(BBNode->getBasicBlock());
438 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
439 MIB.addFrameIndex(FI->getIndex());
440 } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
441 MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags());
442 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
443 int Offset = CP->getOffset();
444 Align Alignment = CP->getAlign();
445
446 unsigned Idx;
447 MachineConstantPool *MCP = MF->getConstantPool();
448 if (CP->isMachineConstantPoolEntry())
449 Idx = MCP->getConstantPoolIndex(CP->getMachineCPVal(), Alignment);
450 else
451 Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Alignment);
452 MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags());
453 } else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) {
454 MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags());
455 } else if (auto *SymNode = dyn_cast<MCSymbolSDNode>(Op)) {
456 MIB.addSym(SymNode->getMCSymbol());
457 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op)) {
458 MIB.addBlockAddress(BA->getBlockAddress(),
459 BA->getOffset(),
460 BA->getTargetFlags());
461 } else if (TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(Op)) {
462 MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags());
463 } else {
464 assert(Op.getValueType() != MVT::Other &&
465 Op.getValueType() != MVT::Glue &&
466 "Chain and glue operands should occur at end of operand list!");
467 AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap,
468 IsDebug, IsClone, IsCloned);
469 }
470}
471
472Register InstrEmitter::ConstrainForSubReg(Register VReg, unsigned SubIdx,
473 MVT VT, bool isDivergent, const DebugLoc &DL) {
474 const TargetRegisterClass *VRC = MRI->getRegClass(VReg);
475 const TargetRegisterClass *RC = TRI->getSubClassWithSubReg(VRC, SubIdx);
476
477 // RC is a sub-class of VRC that supports SubIdx. Try to constrain VReg
478 // within reason.
479 if (RC && RC != VRC)
480 RC = MRI->constrainRegClass(VReg, RC, MinRCSize);
481
482 // VReg has been adjusted. It can be used with SubIdx operands now.
483 if (RC)
484 return VReg;
485
486 // VReg couldn't be reasonably constrained. Emit a COPY to a new virtual
487 // register instead.
488 RC = TRI->getSubClassWithSubReg(TLI->getRegClassFor(VT, isDivergent), SubIdx);
489 assert(RC && "No legal register class for VT supports that SubIdx");
490 Register NewReg = MRI->createVirtualRegister(RC);
491 BuildMI(*MBB, InsertPos, DL, TII->get(TargetOpcode::COPY), NewReg)
492 .addReg(VReg);
493 return NewReg;
494}
495
496/// EmitSubregNode - Generate machine code for subreg nodes.
497///
498void InstrEmitter::EmitSubregNode(SDNode *Node, VRBaseMapType &VRBaseMap,
499 bool IsClone, bool IsCloned) {
500 Register VRBase;
501 unsigned Opc = Node->getMachineOpcode();
502
503 // If the node is only used by a CopyToReg and the dest reg is a vreg, use
504 // the CopyToReg'd destination register instead of creating a new vreg.
505 for (SDNode *User : Node->users()) {
506 if (User->getOpcode() == ISD::CopyToReg &&
507 User->getOperand(2).getNode() == Node) {
508 Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
509 if (DestReg.isVirtual()) {
510 VRBase = DestReg;
511 break;
512 }
513 }
514 }
515
516 if (Opc == TargetOpcode::EXTRACT_SUBREG) {
517 // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub. There are no
518 // constraints on the %dst register, COPY can target all legal register
519 // classes.
520 unsigned SubIdx = Node->getConstantOperandVal(1);
521 const TargetRegisterClass *TRC =
522 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
523
525 MachineInstr *DefMI;
526 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
527 if (R && R->getReg().isPhysical()) {
528 Reg = R->getReg();
529 DefMI = nullptr;
530 } else {
531 Reg = R ? R->getReg() : getVR(Node->getOperand(0), VRBaseMap);
532 DefMI = MRI->getVRegDef(Reg);
533 }
534
535 Register SrcReg, DstReg;
536 unsigned DefSubIdx;
537 if (DefMI &&
538 TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
539 SubIdx == DefSubIdx &&
540 TRC == MRI->getRegClass(SrcReg)) {
541 // Optimize these:
542 // r1025 = s/zext r1024, 4
543 // r1026 = extract_subreg r1025, 4
544 // to a copy
545 // r1026 = copy r1024
546 VRBase = MRI->createVirtualRegister(TRC);
547 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
548 TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
549 MRI->clearKillFlags(SrcReg);
550 } else {
551 // Reg may not support a SubIdx sub-register, and we may need to
552 // constrain its register class or issue a COPY to a compatible register
553 // class.
554 if (Reg.isVirtual())
555 Reg = ConstrainForSubReg(Reg, SubIdx,
556 Node->getOperand(0).getSimpleValueType(),
557 Node->isDivergent(), Node->getDebugLoc());
558 // Create the destreg if it is missing.
559 if (!VRBase)
560 VRBase = MRI->createVirtualRegister(TRC);
561
562 // Create the extract_subreg machine instruction.
563 MachineInstrBuilder CopyMI =
564 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
565 TII->get(TargetOpcode::COPY), VRBase);
566 if (Reg.isVirtual())
567 CopyMI.addReg(Reg, 0, SubIdx);
568 else
569 CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
570 }
571 } else if (Opc == TargetOpcode::INSERT_SUBREG ||
572 Opc == TargetOpcode::SUBREG_TO_REG) {
573 SDValue N0 = Node->getOperand(0);
574 SDValue N1 = Node->getOperand(1);
575 SDValue N2 = Node->getOperand(2);
576 unsigned SubIdx = N2->getAsZExtVal();
577
578 // Figure out the register class to create for the destreg. It should be
579 // the largest legal register class supporting SubIdx sub-registers.
580 // RegisterCoalescer will constrain it further if it decides to eliminate
581 // the INSERT_SUBREG instruction.
582 //
583 // %dst = INSERT_SUBREG %src, %sub, SubIdx
584 //
585 // is lowered by TwoAddressInstructionPass to:
586 //
587 // %dst = COPY %src
588 // %dst:SubIdx = COPY %sub
589 //
590 // There is no constraint on the %src register class.
591 //
592 const TargetRegisterClass *SRC =
593 TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
594 SRC = TRI->getSubClassWithSubReg(SRC, SubIdx);
595 assert(SRC && "No register class supports VT and SubIdx for INSERT_SUBREG");
596
597 if (VRBase == 0 || !SRC->hasSubClassEq(MRI->getRegClass(VRBase)))
598 VRBase = MRI->createVirtualRegister(SRC);
599
600 // Create the insert_subreg or subreg_to_reg machine instruction.
601 MachineInstrBuilder MIB =
602 BuildMI(*MF, Node->getDebugLoc(), TII->get(Opc), VRBase);
603
604 // If creating a subreg_to_reg, then the first input operand
605 // is an implicit value immediate, otherwise it's a register
606 if (Opc == TargetOpcode::SUBREG_TO_REG) {
607 const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
608 MIB.addImm(SD->getZExtValue());
609 } else
610 AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
611 IsClone, IsCloned);
612 // Add the subregister being inserted
613 AddOperand(MIB, N1, 0, nullptr, VRBaseMap, /*IsDebug=*/false,
614 IsClone, IsCloned);
615 MIB.addImm(SubIdx);
616 MBB->insert(InsertPos, MIB);
617 } else
618 llvm_unreachable("Node is not insert_subreg, extract_subreg, or subreg_to_reg");
619
620 SDValue Op(Node, 0);
621 bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second;
622 (void)isNew; // Silence compiler warning.
623 assert(isNew && "Node emitted out of order - early");
624}
625
626/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
627/// COPY_TO_REGCLASS is just a normal copy, except that the destination
628/// register is constrained to be in a particular register class.
629///
630void
631InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
632 VRBaseMapType &VRBaseMap) {
633 // Create the new VReg in the destination class and emit a copy.
634 unsigned DstRCIdx = Node->getConstantOperandVal(1);
635 const TargetRegisterClass *DstRC =
636 TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
637 Register NewVReg = MRI->createVirtualRegister(DstRC);
638 const MCInstrDesc &II = TII->get(TargetOpcode::COPY);
639 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
640 AddOperand(MIB, Node->getOperand(0), 1, &II, VRBaseMap, /*IsDebug=*/false,
641 /*IsClone=*/false, /*IsCloned*/ false);
642
643 MBB->insert(InsertPos, MIB);
644 SDValue Op(Node, 0);
645 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
646 (void)isNew; // Silence compiler warning.
647 assert(isNew && "Node emitted out of order - early");
648}
649
650/// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
651///
652void InstrEmitter::EmitRegSequence(SDNode *Node, VRBaseMapType &VRBaseMap,
653 bool IsClone, bool IsCloned) {
654 unsigned DstRCIdx = Node->getConstantOperandVal(0);
655 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
656 Register NewVReg = MRI->createVirtualRegister(TRI->getAllocatableClass(RC));
657 const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
658 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
659 unsigned NumOps = Node->getNumOperands();
660 // If the input pattern has a chain, then the root of the corresponding
661 // output pattern will get a chain as well. This can happen to be a
662 // REG_SEQUENCE (which is not "guarded" by countOperands/CountResults).
663 if (NumOps && Node->getOperand(NumOps-1).getValueType() == MVT::Other)
664 --NumOps; // Ignore chain if it exists.
665
666 assert((NumOps & 1) == 1 &&
667 "REG_SEQUENCE must have an odd number of operands!");
668 for (unsigned i = 1; i != NumOps; ++i) {
669 SDValue Op = Node->getOperand(i);
670 if ((i & 1) == 0) {
671 RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(i-1));
672 // Skip physical registers as they don't have a vreg to get and we'll
673 // insert copies for them in TwoAddressInstructionPass anyway.
674 if (!R || !R->getReg().isPhysical()) {
675 unsigned SubIdx = Op->getAsZExtVal();
676 Register SubReg = getVR(Node->getOperand(i - 1), VRBaseMap);
677 const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
678 const TargetRegisterClass *SRC =
679 TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
680 if (SRC && SRC != RC) {
681 MRI->setRegClass(NewVReg, SRC);
682 RC = SRC;
683 }
684 }
685 }
686 AddOperand(MIB, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
687 IsClone, IsCloned);
688 }
689
690 MBB->insert(InsertPos, MIB);
691 SDValue Op(Node, 0);
692 bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
693 (void)isNew; // Silence compiler warning.
694 assert(isNew && "Node emitted out of order - early");
695}
696
697/// EmitDbgValue - Generate machine instruction for a dbg_value node.
698///
701 VRBaseMapType &VRBaseMap) {
702 DebugLoc DL = SD->getDebugLoc();
704 ->isValidLocationForIntrinsic(DL) &&
705 "Expected inlined-at fields to agree");
706
707 SD->setIsEmitted();
708
709 assert(!SD->getLocationOps().empty() &&
710 "dbg_value with no location operands?");
711
712 if (SD->isInvalidated())
713 return EmitDbgNoLocation(SD);
714
715 // Attempt to produce a DBG_INSTR_REF if we've been asked to.
716 if (EmitDebugInstrRefs)
717 if (auto *InstrRef = EmitDbgInstrRef(SD, VRBaseMap))
718 return InstrRef;
719
720 // Emit variadic dbg_value nodes as DBG_VALUE_LIST if they have not been
721 // emitted as instruction references.
722 if (SD->isVariadic())
723 return EmitDbgValueList(SD, VRBaseMap);
724
725 // Emit single-location dbg_value nodes as DBG_VALUE if they have not been
726 // emitted as instruction references.
727 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
728}
729
731 const Value *V = Op.getConst();
732 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
733 if (CI->getBitWidth() > 64)
735 if (CI->getBitWidth() == 1)
736 return MachineOperand::CreateImm(CI->getZExtValue());
737 return MachineOperand::CreateImm(CI->getSExtValue());
738 }
739 if (const ConstantFP *CF = dyn_cast<ConstantFP>(V))
741 // Note: This assumes that all nullptr constants are zero-valued.
744 // Undef or unhandled value type, so return an undef operand.
746 /* Reg */ 0U, /* isDef */ false, /* isImp */ false,
747 /* isKill */ false, /* isDead */ false,
748 /* isUndef */ false, /* isEarlyClobber */ false,
749 /* SubReg */ 0, /* isDebug */ true);
750}
751
753 MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc,
754 ArrayRef<SDDbgOperand> LocationOps,
755 VRBaseMapType &VRBaseMap) {
756 for (const SDDbgOperand &Op : LocationOps) {
757 switch (Op.getKind()) {
759 MIB.addFrameIndex(Op.getFrameIx());
760 break;
762 MIB.addReg(Op.getVReg());
763 break;
765 SDValue V = SDValue(Op.getSDNode(), Op.getResNo());
766 // It's possible we replaced this SDNode with other(s) and therefore
767 // didn't generate code for it. It's better to catch these cases where
768 // they happen and transfer the debug info, but trying to guarantee that
769 // in all cases would be very fragile; this is a safeguard for any
770 // that were missed.
771 if (VRBaseMap.count(V) == 0)
772 MIB.addReg(0U); // undef
773 else
774 AddOperand(MIB, V, (*MIB).getNumOperands(), &DbgValDesc, VRBaseMap,
775 /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
776 } break;
779 break;
780 }
781 }
782}
783
786 VRBaseMapType &VRBaseMap) {
787 MDNode *Var = SD->getVariable();
788 const DIExpression *Expr = SD->getExpression();
789 DebugLoc DL = SD->getDebugLoc();
790 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_INSTR_REF);
791
792 // Returns true if the given operand is not a legal debug operand for a
793 // DBG_INSTR_REF.
794 auto IsInvalidOp = [](SDDbgOperand DbgOp) {
795 return DbgOp.getKind() == SDDbgOperand::FRAMEIX;
796 };
797 // Returns true if the given operand is not itself an instruction reference
798 // but is a legal debug operand for a DBG_INSTR_REF.
799 auto IsNonInstrRefOp = [](SDDbgOperand DbgOp) {
800 return DbgOp.getKind() == SDDbgOperand::CONST;
801 };
802
803 // If this variable location does not depend on any instructions or contains
804 // any stack locations, produce it as a standard debug value instead.
805 if (any_of(SD->getLocationOps(), IsInvalidOp) ||
806 all_of(SD->getLocationOps(), IsNonInstrRefOp)) {
807 if (SD->isVariadic())
808 return EmitDbgValueList(SD, VRBaseMap);
809 return EmitDbgValueFromSingleOp(SD, VRBaseMap);
810 }
811
812 // Immediately fold any indirectness from the LLVM-IR intrinsic into the
813 // expression:
814 if (SD->isIndirect())
815 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
816 // If this is not already a variadic expression, it must be modified to become
817 // one.
818 if (!SD->isVariadic())
820
822
823 // It may not be immediately possible to identify the MachineInstr that
824 // defines a VReg, it can depend for example on the order blocks are
825 // emitted in. When this happens, or when further analysis is needed later,
826 // produce an instruction like this:
827 //
828 // DBG_INSTR_REF !123, !456, %0:gr64
829 //
830 // i.e., point the instruction at the vreg, and patch it up later in
831 // MachineFunction::finalizeDebugInstrRefs.
832 auto AddVRegOp = [&](Register VReg) {
834 /* Reg */ VReg, /* isDef */ false, /* isImp */ false,
835 /* isKill */ false, /* isDead */ false,
836 /* isUndef */ false, /* isEarlyClobber */ false,
837 /* SubReg */ 0, /* isDebug */ true));
838 };
839 unsigned OpCount = SD->getLocationOps().size();
840 for (unsigned OpIdx = 0; OpIdx < OpCount; ++OpIdx) {
841 SDDbgOperand DbgOperand = SD->getLocationOps()[OpIdx];
842
843 // Try to find both the defined register and the instruction defining it.
844 MachineInstr *DefMI = nullptr;
845 Register VReg;
846
847 if (DbgOperand.getKind() == SDDbgOperand::VREG) {
848 VReg = DbgOperand.getVReg();
849
850 // No definition means that block hasn't been emitted yet. Leave a vreg
851 // reference to be fixed later.
852 if (!MRI->hasOneDef(VReg)) {
853 AddVRegOp(VReg);
854 continue;
855 }
856
857 DefMI = &*MRI->def_instr_begin(VReg);
858 } else if (DbgOperand.getKind() == SDDbgOperand::SDNODE) {
859 // Look up the corresponding VReg for the given SDNode, if any.
860 SDNode *Node = DbgOperand.getSDNode();
861 SDValue Op = SDValue(Node, DbgOperand.getResNo());
862 VRBaseMapType::iterator I = VRBaseMap.find(Op);
863 // No VReg -> produce a DBG_VALUE $noreg instead.
864 if (I == VRBaseMap.end())
865 break;
866
867 // Try to pick out a defining instruction at this point.
868 VReg = getVR(Op, VRBaseMap);
869
870 // Again, if there's no instruction defining the VReg right now, fix it up
871 // later.
872 if (!MRI->hasOneDef(VReg)) {
873 AddVRegOp(VReg);
874 continue;
875 }
876
877 DefMI = &*MRI->def_instr_begin(VReg);
878 } else {
879 assert(DbgOperand.getKind() == SDDbgOperand::CONST);
880 MOs.push_back(GetMOForConstDbgOp(DbgOperand));
881 continue;
882 }
883
884 // Avoid copy like instructions: they don't define values, only move them.
885 // Leave a virtual-register reference until it can be fixed up later, to
886 // find the underlying value definition.
887 if (DefMI->isCopyLike() || TII->isCopyInstr(*DefMI)) {
888 AddVRegOp(VReg);
889 continue;
890 }
891
892 // Find the operand number which defines the specified VReg.
893 unsigned OperandIdx = 0;
894 for (const auto &MO : DefMI->operands()) {
895 if (MO.isReg() && MO.isDef() && MO.getReg() == VReg)
896 break;
897 ++OperandIdx;
898 }
899 assert(OperandIdx < DefMI->getNumOperands());
900
901 // Make the DBG_INSTR_REF refer to that instruction, and that operand.
902 unsigned InstrNum = DefMI->getDebugInstrNum();
903 MOs.push_back(MachineOperand::CreateDbgInstrRef(InstrNum, OperandIdx));
904 }
905
906 // If we haven't created a valid MachineOperand for every DbgOp, abort and
907 // produce an undef DBG_VALUE.
908 if (MOs.size() != OpCount)
909 return EmitDbgNoLocation(SD);
910
911 return BuildMI(*MF, DL, RefII, false, MOs, Var, Expr);
912}
913
915 // An invalidated SDNode must generate an undef DBG_VALUE: although the
916 // original value is no longer computed, earlier DBG_VALUEs live ranges
917 // must not leak into later code.
918 DIVariable *Var = SD->getVariable();
919 const DIExpression *Expr =
921 DebugLoc DL = SD->getDebugLoc();
922 const MCInstrDesc &Desc = TII->get(TargetOpcode::DBG_VALUE);
923 return BuildMI(*MF, DL, Desc, false, 0U, Var, Expr);
924}
925
928 VRBaseMapType &VRBaseMap) {
929 MDNode *Var = SD->getVariable();
930 DIExpression *Expr = SD->getExpression();
931 DebugLoc DL = SD->getDebugLoc();
932 // DBG_VALUE_LIST := "DBG_VALUE_LIST" var, expression, loc (, loc)*
933 const MCInstrDesc &DbgValDesc = TII->get(TargetOpcode::DBG_VALUE_LIST);
934 // Build the DBG_VALUE_LIST instruction base.
935 auto MIB = BuildMI(*MF, DL, DbgValDesc);
936 MIB.addMetadata(Var);
937 MIB.addMetadata(Expr);
938 AddDbgValueLocationOps(MIB, DbgValDesc, SD->getLocationOps(), VRBaseMap);
939 return &*MIB;
940}
941
944 VRBaseMapType &VRBaseMap) {
945 MDNode *Var = SD->getVariable();
946 DIExpression *Expr = SD->getExpression();
947 DebugLoc DL = SD->getDebugLoc();
948 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
949
950 assert(SD->getLocationOps().size() == 1 &&
951 "Non variadic dbg_value should have only one location op");
952
953 // See about constant-folding the expression.
954 // Copy the location operand in case we replace it.
955 SmallVector<SDDbgOperand, 1> LocationOps(1, SD->getLocationOps()[0]);
956 if (Expr && LocationOps[0].getKind() == SDDbgOperand::CONST) {
957 const Value *V = LocationOps[0].getConst();
958 if (auto *C = dyn_cast<ConstantInt>(V)) {
959 std::tie(Expr, C) = Expr->constantFold(C);
960 LocationOps[0] = SDDbgOperand::fromConst(C);
961 }
962 }
963
964 // Emit non-variadic dbg_value nodes as DBG_VALUE.
965 // DBG_VALUE := "DBG_VALUE" loc, isIndirect, var, expr
966 auto MIB = BuildMI(*MF, DL, II);
967 AddDbgValueLocationOps(MIB, II, LocationOps, VRBaseMap);
968
969 if (SD->isIndirect())
970 MIB.addImm(0U);
971 else
972 MIB.addReg(0U);
973
974 return MIB.addMetadata(Var).addMetadata(Expr);
975}
976
979 MDNode *Label = SD->getLabel();
980 DebugLoc DL = SD->getDebugLoc();
981 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
982 "Expected inlined-at fields to agree");
983
984 const MCInstrDesc &II = TII->get(TargetOpcode::DBG_LABEL);
985 MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
986 MIB.addMetadata(Label);
987
988 return &*MIB;
989}
990
991/// EmitMachineNode - Generate machine code for a target-specific node and
992/// needed dependencies.
993///
994void InstrEmitter::
995EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
996 VRBaseMapType &VRBaseMap) {
997 unsigned Opc = Node->getMachineOpcode();
998
999 // Handle subreg insert/extract specially
1000 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1001 Opc == TargetOpcode::INSERT_SUBREG ||
1002 Opc == TargetOpcode::SUBREG_TO_REG) {
1003 EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
1004 return;
1005 }
1006
1007 // Handle COPY_TO_REGCLASS specially.
1008 if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
1009 EmitCopyToRegClassNode(Node, VRBaseMap);
1010 return;
1011 }
1012
1013 // Handle REG_SEQUENCE specially.
1014 if (Opc == TargetOpcode::REG_SEQUENCE) {
1015 EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
1016 return;
1017 }
1018
1019 if (Opc == TargetOpcode::IMPLICIT_DEF)
1020 // We want a unique VR for each IMPLICIT_DEF use.
1021 return;
1022
1023 const MCInstrDesc &II = TII->get(Opc);
1024 unsigned NumResults = CountResults(Node);
1025 unsigned NumDefs = II.getNumDefs();
1026 const MCPhysReg *ScratchRegs = nullptr;
1027
1028 // Handle STACKMAP and PATCHPOINT specially and then use the generic code.
1029 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
1030 // Stackmaps do not have arguments and do not preserve their calling
1031 // convention. However, to simplify runtime support, they clobber the same
1032 // scratch registers as AnyRegCC.
1033 unsigned CC = CallingConv::AnyReg;
1034 if (Opc == TargetOpcode::PATCHPOINT) {
1035 CC = Node->getConstantOperandVal(PatchPointOpers::CCPos);
1036 NumDefs = NumResults;
1037 }
1038 ScratchRegs = TLI->getScratchRegisters((CallingConv::ID) CC);
1039 } else if (Opc == TargetOpcode::STATEPOINT) {
1040 NumDefs = NumResults;
1041 }
1042
1043 unsigned NumImpUses = 0;
1044 unsigned NodeOperands =
1045 countOperands(Node, II.getNumOperands() - NumDefs, NumImpUses);
1046 bool HasVRegVariadicDefs = !MF->getTarget().usesPhysRegsForValues() &&
1047 II.isVariadic() && II.variadicOpsAreDefs();
1048 bool HasPhysRegOuts = NumResults > NumDefs && !II.implicit_defs().empty() &&
1049 !HasVRegVariadicDefs;
1050#ifndef NDEBUG
1051 unsigned NumMIOperands = NodeOperands + NumResults;
1052 if (II.isVariadic())
1053 assert(NumMIOperands >= II.getNumOperands() &&
1054 "Too few operands for a variadic node!");
1055 else
1056 assert(NumMIOperands >= II.getNumOperands() &&
1057 NumMIOperands <=
1058 II.getNumOperands() + II.implicit_defs().size() + NumImpUses &&
1059 "#operands for dag node doesn't match .td file!");
1060#endif
1061
1062 // Create the new machine instruction.
1063 MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II);
1064
1065 // Transfer IR flags from the SDNode to the MachineInstr
1066 MachineInstr *MI = MIB.getInstr();
1067 const SDNodeFlags Flags = Node->getFlags();
1068 if (Flags.hasUnpredictable())
1070
1071 // Add result register values for things that are defined by this
1072 // instruction.
1073 if (NumResults) {
1074 CreateVirtualRegisters(Node, MIB, II, IsClone, IsCloned, VRBaseMap);
1075
1076 if (Flags.hasNoSignedZeros())
1078
1079 if (Flags.hasAllowReciprocal())
1081
1082 if (Flags.hasNoNaNs())
1084
1085 if (Flags.hasNoInfs())
1087
1088 if (Flags.hasAllowContract())
1090
1091 if (Flags.hasApproximateFuncs())
1093
1094 if (Flags.hasAllowReassociation())
1096
1097 if (Flags.hasNoUnsignedWrap())
1099
1100 if (Flags.hasNoSignedWrap())
1102
1103 if (Flags.hasExact())
1105
1106 if (Flags.hasNoFPExcept())
1108
1109 if (Flags.hasDisjoint())
1111
1112 if (Flags.hasSameSign())
1114 }
1115
1116 // Emit all of the actual operands of this instruction, adding them to the
1117 // instruction as appropriate.
1118 bool HasOptPRefs = NumDefs > NumResults;
1119 assert((!HasOptPRefs || !HasPhysRegOuts) &&
1120 "Unable to cope with optional defs and phys regs defs!");
1121 unsigned NumSkip = HasOptPRefs ? NumDefs - NumResults : 0;
1122 for (unsigned i = NumSkip; i != NodeOperands; ++i)
1123 AddOperand(MIB, Node->getOperand(i), i-NumSkip+NumDefs, &II,
1124 VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
1125
1126 // Add scratch registers as implicit def and early clobber
1127 if (ScratchRegs)
1128 for (unsigned i = 0; ScratchRegs[i]; ++i)
1129 MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
1131
1132 // Set the memory reference descriptions of this instruction now that it is
1133 // part of the function.
1134 MIB.setMemRefs(cast<MachineSDNode>(Node)->memoperands());
1135
1136 // Set the CFI type.
1137 MIB->setCFIType(*MF, Node->getCFIType());
1138
1139 // Insert the instruction into position in the block. This needs to
1140 // happen before any custom inserter hook is called so that the
1141 // hook knows where in the block to insert the replacement code.
1142 MBB->insert(InsertPos, MIB);
1143
1144 // The MachineInstr may also define physregs instead of virtregs. These
1145 // physreg values can reach other instructions in different ways:
1146 //
1147 // 1. When there is a use of a Node value beyond the explicitly defined
1148 // virtual registers, we emit a CopyFromReg for one of the implicitly
1149 // defined physregs. This only happens when HasPhysRegOuts is true.
1150 //
1151 // 2. A CopyFromReg reading a physreg may be glued to this instruction.
1152 //
1153 // 3. A glued instruction may implicitly use a physreg.
1154 //
1155 // 4. A glued instruction may use a RegisterSDNode operand.
1156 //
1157 // Collect all the used physreg defs, and make sure that any unused physreg
1158 // defs are marked as dead.
1159 SmallVector<Register, 8> UsedRegs;
1160
1161 // Additional results must be physical register defs.
1162 if (HasPhysRegOuts) {
1163 for (unsigned i = NumDefs; i < NumResults; ++i) {
1164 Register Reg = II.implicit_defs()[i - NumDefs];
1165 if (!Node->hasAnyUseOfValue(i))
1166 continue;
1167 // This implicitly defined physreg has a use.
1168 UsedRegs.push_back(Reg);
1169 EmitCopyFromReg(SDValue(Node, i), IsClone, Reg, VRBaseMap);
1170 }
1171 }
1172
1173 // Scan the glue chain for any used physregs.
1174 if (Node->getValueType(Node->getNumValues()-1) == MVT::Glue) {
1175 for (SDNode *F = Node->getGluedUser(); F; F = F->getGluedUser()) {
1176 if (F->getOpcode() == ISD::CopyFromReg) {
1177 Register Reg = cast<RegisterSDNode>(F->getOperand(1))->getReg();
1178 if (Reg.isPhysical())
1179 UsedRegs.push_back(Reg);
1180 continue;
1181 } else if (F->getOpcode() == ISD::CopyToReg) {
1182 // Skip CopyToReg nodes that are internal to the glue chain.
1183 continue;
1184 }
1185 // Collect declared implicit uses.
1186 const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
1187 append_range(UsedRegs, MCID.implicit_uses());
1188 // In addition to declared implicit uses, we must also check for
1189 // direct RegisterSDNode operands.
1190 for (const SDValue &Op : F->op_values())
1191 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(Op)) {
1192 Register Reg = R->getReg();
1193 if (Reg.isPhysical())
1194 UsedRegs.push_back(Reg);
1195 }
1196 }
1197 }
1198
1199 // Add rounding control registers as implicit def for function call.
1200 if (II.isCall() && MF->getFunction().hasFnAttribute(Attribute::StrictFP)) {
1201 ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
1202 llvm::append_range(UsedRegs, RCRegs);
1203 }
1204
1205 // Finally mark unused registers as dead.
1206 if (!UsedRegs.empty() || !II.implicit_defs().empty() || II.hasOptionalDef())
1207 MIB->setPhysRegsDeadExcept(UsedRegs, *TRI);
1208
1209 // STATEPOINT is too 'dynamic' to have meaningful machine description.
1210 // We have to manually tie operands.
1211 if (Opc == TargetOpcode::STATEPOINT && NumDefs > 0) {
1212 assert(!HasPhysRegOuts && "STATEPOINT mishandled");
1213 MachineInstr *MI = MIB;
1214 unsigned Def = 0;
1215 int First = StatepointOpers(MI).getFirstGCPtrIdx();
1216 assert(First > 0 && "Statepoint has Defs but no GC ptr list");
1217 unsigned Use = (unsigned)First;
1218 while (Def < NumDefs) {
1219 if (MI->getOperand(Use).isReg())
1220 MI->tieOperands(Def++, Use);
1222 }
1223 }
1224
1225 if (SDNode *GluedNode = Node->getGluedNode()) {
1226 // FIXME: Possibly iterate over multiple glue nodes?
1227 if (GluedNode->getOpcode() ==
1228 ~(unsigned)TargetOpcode::CONVERGENCECTRL_GLUE) {
1229 Register VReg = getVR(GluedNode->getOperand(0), VRBaseMap);
1230 MachineOperand MO = MachineOperand::CreateReg(VReg, /*isDef=*/false,
1231 /*isImp=*/true);
1232 MIB->addOperand(MO);
1233 }
1234 }
1235
1236 // Run post-isel target hook to adjust this instruction if needed.
1237 if (II.hasPostISelHook())
1238 TLI->AdjustInstrPostInstrSelection(*MIB, Node);
1239}
1240
1241/// EmitSpecialNode - Generate machine code for a target-independent node and
1242/// needed dependencies.
1243void InstrEmitter::
1244EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
1245 VRBaseMapType &VRBaseMap) {
1246 switch (Node->getOpcode()) {
1247 default:
1248#ifndef NDEBUG
1249 Node->dump();
1250#endif
1251 llvm_unreachable("This target-independent node should have been selected!");
1252 case ISD::EntryToken:
1253 case ISD::MERGE_VALUES:
1254 case ISD::TokenFactor: // fall thru
1255 break;
1256 case ISD::CopyToReg: {
1257 Register DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1258 SDValue SrcVal = Node->getOperand(2);
1259 if (DestReg.isVirtual() && SrcVal.isMachineOpcode() &&
1260 SrcVal.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
1261 // Instead building a COPY to that vreg destination, build an
1262 // IMPLICIT_DEF instruction instead.
1263 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1264 TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
1265 break;
1266 }
1267 Register SrcReg;
1268 if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
1269 SrcReg = R->getReg();
1270 else
1271 SrcReg = getVR(SrcVal, VRBaseMap);
1272
1273 if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
1274 break;
1275
1276 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
1277 DestReg).addReg(SrcReg);
1278 break;
1279 }
1280 case ISD::CopyFromReg: {
1281 Register SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1282 EmitCopyFromReg(SDValue(Node, 0), IsClone, SrcReg, VRBaseMap);
1283 break;
1284 }
1285 case ISD::EH_LABEL:
1286 case ISD::ANNOTATION_LABEL: {
1287 unsigned Opc = (Node->getOpcode() == ISD::EH_LABEL)
1288 ? TargetOpcode::EH_LABEL
1289 : TargetOpcode::ANNOTATION_LABEL;
1290 MCSymbol *S = cast<LabelSDNode>(Node)->getLabel();
1291 BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
1292 TII->get(Opc)).addSym(S);
1293 break;
1294 }
1295
1296 case ISD::LIFETIME_START:
1297 case ISD::LIFETIME_END: {
1298 unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START)
1299 ? TargetOpcode::LIFETIME_START
1300 : TargetOpcode::LIFETIME_END;
1301 auto *FI = cast<FrameIndexSDNode>(Node->getOperand(1));
1302 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1303 .addFrameIndex(FI->getIndex());
1304 break;
1305 }
1306
1307 case ISD::PSEUDO_PROBE: {
1308 unsigned TarOp = TargetOpcode::PSEUDO_PROBE;
1309 auto Guid = cast<PseudoProbeSDNode>(Node)->getGuid();
1310 auto Index = cast<PseudoProbeSDNode>(Node)->getIndex();
1311 auto Attr = cast<PseudoProbeSDNode>(Node)->getAttributes();
1312
1313 BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp))
1314 .addImm(Guid)
1315 .addImm(Index)
1317 .addImm(Attr);
1318 break;
1319 }
1320
1321 case ISD::INLINEASM:
1322 case ISD::INLINEASM_BR: {
1323 unsigned NumOps = Node->getNumOperands();
1324 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1325 --NumOps; // Ignore the glue operand.
1326
1327 // Create the inline asm machine instruction.
1328 unsigned TgtOpc = Node->getOpcode() == ISD::INLINEASM_BR
1329 ? TargetOpcode::INLINEASM_BR
1330 : TargetOpcode::INLINEASM;
1331 MachineInstrBuilder MIB =
1332 BuildMI(*MF, Node->getDebugLoc(), TII->get(TgtOpc));
1333
1334 // Add the asm string as an external symbol operand.
1335 SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
1336 const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
1337 MIB.addExternalSymbol(AsmStr);
1338
1339 // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore
1340 // bits.
1341 int64_t ExtraInfo =
1343 getZExtValue();
1344 MIB.addImm(ExtraInfo);
1345
1346 // Remember to operand index of the group flags.
1347 SmallVector<unsigned, 8> GroupIdx;
1348
1349 // Remember registers that are part of early-clobber defs.
1351
1352 // Add all of the operand registers to the instruction.
1353 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1354 unsigned Flags = Node->getConstantOperandVal(i);
1355 const InlineAsm::Flag F(Flags);
1356 const unsigned NumVals = F.getNumOperandRegisters();
1357
1358 GroupIdx.push_back(MIB->getNumOperands());
1359 MIB.addImm(Flags);
1360 ++i; // Skip the ID value.
1361
1362 switch (F.getKind()) {
1364 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1365 Register Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1366 // FIXME: Add dead flags for physical and virtual registers defined.
1367 // For now, mark physical register defs as implicit to help fast
1368 // regalloc. This makes inline asm look a lot like calls.
1370 }
1371 break;
1374 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1375 Register Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1378 ECRegs.push_back(Reg);
1379 }
1380 break;
1381 case InlineAsm::Kind::RegUse: // Use of register.
1382 case InlineAsm::Kind::Imm: // Immediate.
1383 case InlineAsm::Kind::Mem: // Non-function addressing mode.
1384 // The addressing mode has been selected, just add all of the
1385 // operands to the machine instruction.
1386 for (unsigned j = 0; j != NumVals; ++j, ++i)
1387 AddOperand(MIB, Node->getOperand(i), 0, nullptr, VRBaseMap,
1388 /*IsDebug=*/false, IsClone, IsCloned);
1389
1390 // Manually set isTied bits.
1391 if (F.isRegUseKind()) {
1392 unsigned DefGroup;
1393 if (F.isUseOperandTiedToDef(DefGroup)) {
1394 unsigned DefIdx = GroupIdx[DefGroup] + 1;
1395 unsigned UseIdx = GroupIdx.back() + 1;
1396 for (unsigned j = 0; j != NumVals; ++j)
1397 MIB->tieOperands(DefIdx + j, UseIdx + j);
1398 }
1399 }
1400 break;
1401 case InlineAsm::Kind::Func: // Function addressing mode.
1402 for (unsigned j = 0; j != NumVals; ++j, ++i) {
1403 SDValue Op = Node->getOperand(i);
1404 AddOperand(MIB, Op, 0, nullptr, VRBaseMap,
1405 /*IsDebug=*/false, IsClone, IsCloned);
1406
1407 // Adjust Target Flags for function reference.
1408 if (auto *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
1409 unsigned NewFlags =
1410 MF->getSubtarget().classifyGlobalFunctionReference(
1411 TGA->getGlobal());
1412 unsigned LastIdx = MIB.getInstr()->getNumOperands() - 1;
1413 MIB.getInstr()->getOperand(LastIdx).setTargetFlags(NewFlags);
1414 }
1415 }
1416 }
1417 }
1418
1419 // Add rounding control registers as implicit def for inline asm.
1420 if (MF->getFunction().hasFnAttribute(Attribute::StrictFP)) {
1421 ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
1422 for (MCPhysReg Reg : RCRegs)
1424 }
1425
1426 // GCC inline assembly allows input operands to also be early-clobber
1427 // output operands (so long as the operand is written only after it's
1428 // used), but this does not match the semantics of our early-clobber flag.
1429 // If an early-clobber operand register is also an input operand register,
1430 // then remove the early-clobber flag.
1431 for (Register Reg : ECRegs) {
1432 if (MIB->readsRegister(Reg, TRI)) {
1433 MachineOperand *MO =
1434 MIB->findRegisterDefOperand(Reg, TRI, false, false);
1435 assert(MO && "No def operand for clobbered register?");
1436 MO->setIsEarlyClobber(false);
1437 }
1438 }
1439
1440 // Get the mdnode from the asm if it exists and add it to the instruction.
1441 SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
1442 const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
1443 if (MD)
1444 MIB.addMetadata(MD);
1445
1446 MBB->insert(InsertPos, MIB);
1447 break;
1448 }
1449 }
1450}
1451
1452/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
1453/// at the given position in the given block.
1456 : MF(mbb->getParent()), MRI(&MF->getRegInfo()),
1457 TII(MF->getSubtarget().getInstrInfo()),
1458 TRI(MF->getSubtarget().getRegisterInfo()),
1459 TLI(MF->getSubtarget().getTargetLowering()), MBB(mbb),
1460 InsertPos(insertpos) {
1461 EmitDebugInstrRefs = mbb->getParent()->useDebugInstrRef();
1462}
unsigned SubReg
MachineInstrBuilder MachineInstrBuilder & DefMI
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This file contains constants used for implementing Dwarf debug support.
IRTranslator LLVM IR MI
static bool isConvergenceCtrlMachineOp(SDValue Op)
MachineOperand GetMOForConstDbgOp(const SDDbgOperand &Op)
const unsigned MinRCSize
MinRCSize - Smallest register class we allow when constraining virtual registers.
static unsigned countOperands(SDNode *Node, unsigned NumExpUses, unsigned &NumImpUses)
countOperands - The inputs to target nodes have any actual inputs first, followed by an optional chai...
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
This is the shared class of boolean and integer constants.
Definition Constants.h:87
uint64_t getZExtValue() const
DWARF expression.
static LLVM_ABI DIExpression * append(const DIExpression *Expr, ArrayRef< uint64_t > Ops)
Append the opcodes Ops to DIExpr.
LLVM_ABI std::pair< DIExpression *, const ConstantInt * > constantFold(const ConstantInt *CI)
Try to shorten an expression with an initial constant operand.
static LLVM_ABI const DIExpression * convertToVariadicExpression(const DIExpression *Expr)
If Expr is a non-variadic expression (i.e.
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
Base class for variables.
A debug info location.
Definition DebugLoc.h:124
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:174
iterator end()
Definition DenseMap.h:81
MachineInstr * EmitDbgValue(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
EmitDbgValue - Generate machine instruction for a dbg_value node.
MachineInstr * EmitDbgInstrRef(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
Emit a dbg_value as a DBG_INSTR_REF.
SmallDenseMap< SDValue, Register, 16 > VRBaseMapType
MachineInstr * EmitDbgLabel(SDDbgLabel *SD)
Generate machine instruction for a dbg_label node.
MachineInstr * EmitDbgNoLocation(SDDbgValue *SD)
Emit a DBG_VALUE $noreg, indicating a variable has no location.
static unsigned CountResults(SDNode *Node)
CountResults - The results of target nodes have register or immediate operands first,...
MachineInstr * EmitDbgValueList(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
Emit a DBG_VALUE_LIST from the operands to SDDbgValue.
InstrEmitter(const TargetMachine &TM, MachineBasicBlock *mbb, MachineBasicBlock::iterator insertpos)
InstrEmitter - Construct an InstrEmitter and set it to start inserting at the given position in the g...
void AddDbgValueLocationOps(MachineInstrBuilder &MIB, const MCInstrDesc &DbgValDesc, ArrayRef< SDDbgOperand > Locations, VRBaseMapType &VRBaseMap)
MachineInstr * EmitDbgValueFromSingleOp(SDDbgValue *SD, VRBaseMapType &VRBaseMap)
Emit a DBG_VALUE from the operands to SDDbgValue.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
Metadata node.
Definition Metadata.h:1078
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
Machine Value Type.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
const MachineInstrBuilder & addTargetIndex(unsigned Idx, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
const MachineOperand & getOperand(unsigned i) const
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateFPImm(const ConstantFP *CFP)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateCImm(const ConstantInt *CI)
void setIsEarlyClobber(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateDbgInstrRef(unsigned InstrIdx, unsigned OpIdx)
void setTargetFlags(unsigned F)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
Holds the information from a dbg_label node through SDISel.
MDNode * getLabel() const
Returns the MDNode pointer for the label.
const DebugLoc & getDebugLoc() const
Returns the DebugLoc.
Holds the information for a single machine location through SDISel; either an SDNode,...
Register getVReg() const
Returns the Virtual Register for a VReg.
unsigned getResNo() const
Returns the ResNo for a register ref.
static SDDbgOperand fromConst(const Value *Const)
SDNode * getSDNode() const
Returns the SDNode* for a register ref.
@ VREG
Value is a virtual register.
@ FRAMEIX
Value is contents of a stack location.
@ SDNODE
Value is the result of an expression.
@ CONST
Value is a constant.
Kind getKind() const
Holds the information from a dbg_value node through SDISel.
const DebugLoc & getDebugLoc() const
Returns the DebugLoc.
DIVariable * getVariable() const
Returns the DIVariable pointer for the variable.
bool isInvalidated() const
ArrayRef< SDDbgOperand > getLocationOps() const
DIExpression * getExpression() const
Returns the DIExpression pointer for the expression.
bool isIndirect() const
Returns whether this is an indirect value.
void setIsEmitted()
setIsEmitted / isEmitted - Getter/Setter for flag indicating that this SDDbgValue has been emitted to...
bool isVariadic() const
Represents one node in the SelectionDAG.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
bool isMachineOpcode() const
unsigned getMachineOpcode() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static LLVM_ABI unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
Primary interface to the complete machine description for the target machine.
bool isAllocatable() const
Return true if this register class may be used to create virtual registers.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
LLVM Value Representation.
Definition Value.h:75
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition ISDOpcodes.h:256
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
Definition ISDOpcodes.h:48
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:219
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition ISDOpcodes.h:53
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
@ User
could "use" a pointer
NodeAddr< DefNode * > Def
Definition RDFGraph.h:384
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
Op::Description Desc
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
unsigned getImplRegState(bool B)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
unsigned getDebugRegState(bool B)
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
#define N
TODO: Might pack better if we changed this to a Struct of Arrays, since MachineOperand is width 32,...