LLVM 23.0.0git
InlineAsmLowering.cpp
Go to the documentation of this file.
1//===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
11///
12//===----------------------------------------------------------------------===//
13
20#include "llvm/IR/Module.h"
21
22#define DEBUG_TYPE "inline-asm-lowering"
23
24using namespace llvm;
25
26void InlineAsmLowering::anchor() {}
27
28namespace {
29
30/// GISelAsmOperandInfo - This contains information for each constraint that we
31/// are lowering.
32class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
33public:
34 /// Regs - If this is a register or register class operand, this
35 /// contains the set of assigned registers corresponding to the operand.
36 SmallVector<Register, 1> Regs;
37
38 explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
39 : TargetLowering::AsmOperandInfo(Info) {}
40};
41
42using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
43
44class ExtraFlags {
45 unsigned Flags = 0;
46
47public:
48 explicit ExtraFlags(const CallBase &CB) {
49 const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand());
50 if (IA->hasSideEffects())
52 if (IA->isAlignStack())
54 if (IA->canThrow())
56 if (CB.isConvergent())
58 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
59 }
60
61 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
62 // Ideally, we would only check against memory constraints. However, the
63 // meaning of an Other constraint can be target-specific and we can't easily
64 // reason about it. Therefore, be conservative and set MayLoad/MayStore
65 // for Other constraints as well.
68 if (OpInfo.Type == InlineAsm::isInput)
70 else if (OpInfo.Type == InlineAsm::isOutput)
72 else if (OpInfo.Type == InlineAsm::isClobber)
74 }
75 }
76
77 unsigned get() const { return Flags; }
78};
79
80} // namespace
81
82/// Assign virtual/physical registers for the specified register operand.
84 MachineIRBuilder &MIRBuilder,
85 GISelAsmOperandInfo &OpInfo,
86 GISelAsmOperandInfo &RefOpInfo) {
87
90
91 // No work to do for memory operations.
92 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
93 return;
94
95 // If this is a constraint for a single physreg, or a constraint for a
96 // register class, find it.
97 Register AssignedReg;
98 const TargetRegisterClass *RC;
99 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
100 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
101 // RC is unset only on failure. Return immediately.
102 if (!RC)
103 return;
104
105 // No need to allocate a matching input constraint since the constraint it's
106 // matching to has already been allocated.
107 if (OpInfo.isMatchingInputConstraint())
108 return;
109
110 // Initialize NumRegs.
111 unsigned NumRegs = 1;
112 if (OpInfo.ConstraintVT != MVT::Other)
113 NumRegs =
114 TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT);
115
116 // If this is a constraint for a specific physical register, but the type of
117 // the operand requires more than one register to be passed, we allocate the
118 // required amount of physical registers, starting from the selected physical
119 // register.
120 // For this, first retrieve a register iterator for the given register class
123
124 // Advance the iterator to the assigned register (if set)
125 if (AssignedReg) {
126 for (; *I != AssignedReg; ++I)
127 assert(I != RC->end() && "AssignedReg should be a member of provided RC");
128 }
129
130 // Finally, assign the registers. If the AssignedReg isn't set, create virtual
131 // registers with the provided register class
132 for (; NumRegs; --NumRegs, ++I) {
133 assert(I != RC->end() && "Ran out of registers to allocate!");
134 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
135 OpInfo.Regs.push_back(R);
136 }
137}
138
141 assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
142
143 // Single-letter constraints ('r') are very common.
144 if (OpInfo.Codes.size() == 1) {
145 OpInfo.ConstraintCode = OpInfo.Codes[0];
146 OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
147 } else {
149 if (G.empty())
150 return;
151 // FIXME: prefer immediate constraints if the target allows it
152 unsigned BestIdx = 0;
153 for (const unsigned E = G.size();
154 BestIdx < E && (G[BestIdx].second == TargetLowering::C_Other ||
155 G[BestIdx].second == TargetLowering::C_Immediate);
156 ++BestIdx)
157 ;
158 OpInfo.ConstraintCode = G[BestIdx].first;
159 OpInfo.ConstraintType = G[BestIdx].second;
160 }
161
162 // 'X' matches anything.
163 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
164 // Labels and constants are handled elsewhere ('X' is the only thing
165 // that matches labels). For Functions, the type here is the type of
166 // the result, which is not what we want to look at; leave them alone.
167 Value *Val = OpInfo.CallOperandVal;
168 if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
169 return;
170
171 // Otherwise, try to resolve it to something we know about by looking at
172 // the actual operand type.
173 if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) {
174 OpInfo.ConstraintCode = Repl;
175 OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
176 }
177 }
178}
179
180static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
181 const InlineAsm::Flag F(I.getOperand(OpIdx).getImm());
182 return F.getNumOperandRegisters();
183}
184
186 MachineIRBuilder &MIRBuilder) {
187 const TargetRegisterInfo *TRI =
188 MIRBuilder.getMF().getSubtarget().getRegisterInfo();
189 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
190
191 auto SrcTy = MRI->getType(Src);
192 if (!SrcTy.isValid()) {
193 LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
194 return false;
195 }
196 unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI);
197 unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI);
198
199 if (DstSize < SrcSize) {
200 LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
201 return false;
202 }
203
204 // Attempt to anyext small scalar sources.
205 if (DstSize > SrcSize) {
206 if (!SrcTy.isScalar()) {
207 LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
208 "destination register class\n");
209 return false;
210 }
211 Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0);
212 }
213
214 MIRBuilder.buildCopy(Dst, Src);
215 return true;
216}
217
219 MachineIRBuilder &MIRBuilder, const CallBase &Call,
220 std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
221 const {
222 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
223
224 /// ConstraintOperands - Information about all of the constraints.
225 GISelAsmOperandInfoVector ConstraintOperands;
226
227 MachineFunction &MF = MIRBuilder.getMF();
228 const Function &F = MF.getFunction();
229 const DataLayout &DL = F.getDataLayout();
231
232 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
233
234 TargetLowering::AsmOperandInfoVector TargetConstraints =
235 TLI->ParseConstraints(DL, TRI, Call);
236
237 ExtraFlags ExtraInfo(Call);
238 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
239 unsigned ResNo = 0; // ResNo - The result number of the next output.
240 for (auto &T : TargetConstraints) {
241 ConstraintOperands.push_back(GISelAsmOperandInfo(T));
242 GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
243
244 // Compute the value type for each operand.
245 if (OpInfo.hasArg()) {
246 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo);
247
248 if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
249 LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
250 return false;
251 }
252
253 Type *OpTy = OpInfo.CallOperandVal->getType();
254
255 // If this is an indirect operand, the operand is a pointer to the
256 // accessed type.
257 if (OpInfo.isIndirect) {
258 OpTy = Call.getParamElementType(ArgNo);
259 assert(OpTy && "Indirect operand must have elementtype attribute");
260 }
261
262 // FIXME: Support aggregate input operands
263 if (!OpTy->isSingleValueType()) {
265 dbgs() << "Aggregate input operands are not supported yet\n");
266 return false;
267 }
268
269 OpInfo.ConstraintVT =
270 TLI->getAsmOperandValueType(DL, OpTy, true).getSimpleVT();
271 ++ArgNo;
272 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
273 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
274 if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
275 OpInfo.ConstraintVT =
276 TLI->getSimpleValueType(DL, STy->getElementType(ResNo));
277 } else {
278 assert(ResNo == 0 && "Asm only has one result!");
279 OpInfo.ConstraintVT =
280 TLI->getAsmOperandValueType(DL, Call.getType()).getSimpleVT();
281 }
282 ++ResNo;
283 } else {
284 assert(OpInfo.Type != InlineAsm::isLabel &&
285 "GlobalISel currently doesn't support callbr");
286 OpInfo.ConstraintVT = MVT::Other;
287 }
288
289 if (OpInfo.ConstraintVT == MVT::i64x8)
290 return false;
291
292 // Compute the constraint code and ConstraintType to use.
293 computeConstraintToUse(TLI, OpInfo);
294
295 // The selected constraint type might expose new sideeffects
296 ExtraInfo.update(OpInfo);
297 }
298
299 // At this point, all operand types are decided.
300 // Create the MachineInstr, but don't insert it yet since input
301 // operands still need to insert instructions before this one
302 auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
303 .addExternalSymbol(IA->getAsmString().data())
304 .addImm(ExtraInfo.get());
305
306 // Starting from this operand: flag followed by register(s) will be added as
307 // operands to Inst for each constraint. Used for matching input constraints.
308 unsigned StartIdx = Inst->getNumOperands();
309
310 // Collects the output operands for later processing
311 GISelAsmOperandInfoVector OutputOperands;
312
313 for (auto &OpInfo : ConstraintOperands) {
314 GISelAsmOperandInfo &RefOpInfo =
315 OpInfo.isMatchingInputConstraint()
316 ? ConstraintOperands[OpInfo.getMatchedOperand()]
317 : OpInfo;
318
319 // Assign registers for register operands
320 getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
321
322 switch (OpInfo.Type) {
324 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
325 const InlineAsm::ConstraintCode ConstraintID =
326 TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
328 "Failed to convert memory constraint code to constraint id.");
329
330 // Add information to the INLINEASM instruction to know about this
331 // output.
333 Flag.setMemConstraint(ConstraintID);
334 Inst.addImm(Flag);
335 ArrayRef<Register> SourceRegs =
336 GetOrCreateVRegs(*OpInfo.CallOperandVal);
337 assert(
338 SourceRegs.size() == 1 &&
339 "Expected the memory output to fit into a single virtual register");
340 Inst.addReg(SourceRegs[0]);
341 } else {
342 // Otherwise, this outputs to a register (directly for C_Register /
343 // C_RegisterClass/C_Other.
344 assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
345 OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
346 OpInfo.ConstraintType == TargetLowering::C_Other);
347
348 // Find a register that we can use.
349 if (OpInfo.Regs.empty()) {
351 << "Couldn't allocate output register for constraint\n");
352 return false;
353 }
354
355 // Add information to the INLINEASM instruction to know that this
356 // register is set.
357 InlineAsm::Flag Flag(OpInfo.isEarlyClobber
360 OpInfo.Regs.size());
361 if (OpInfo.Regs.front().isVirtual()) {
362 // Put the register class of the virtual registers in the flag word.
363 // That way, later passes can recompute register class constraints for
364 // inline assembly as well as normal instructions. Don't do this for
365 // tied operands that can use the regclass information from the def.
366 const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
367 Flag.setRegClass(RC->getID());
368 }
369
370 Inst.addImm(Flag);
371
372 for (Register Reg : OpInfo.Regs) {
373 Inst.addReg(Reg, RegState::Define |
374 getImplRegState(Reg.isPhysical()) |
375 getEarlyClobberRegState(OpInfo.isEarlyClobber));
376 }
377
378 // Remember this output operand for later processing
379 OutputOperands.push_back(OpInfo);
380 }
381
382 break;
384 case InlineAsm::isLabel: {
385 if (OpInfo.isMatchingInputConstraint()) {
386 unsigned DefIdx = OpInfo.getMatchedOperand();
387 // Find operand with register def that corresponds to DefIdx.
388 unsigned InstFlagIdx = StartIdx;
389 for (unsigned i = 0; i < DefIdx; ++i)
390 InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1;
391 assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
392
393 const InlineAsm::Flag MatchedOperandFlag(Inst->getOperand(InstFlagIdx).getImm());
394 if (MatchedOperandFlag.isMemKind()) {
395 LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
396 "supported. This should be target specific.\n");
397 return false;
398 }
399 if (!MatchedOperandFlag.isRegDefKind() && !MatchedOperandFlag.isRegDefEarlyClobberKind()) {
400 LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
401 return false;
402 }
403
404 // We want to tie input to register in next operand.
405 unsigned DefRegIdx = InstFlagIdx + 1;
406 Register Def = Inst->getOperand(DefRegIdx).getReg();
407
408 ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
409 assert(SrcRegs.size() == 1 && "Single register is expected here");
410
411 // When Def is physreg: use given input.
412 Register In = SrcRegs[0];
413 // When Def is vreg: copy input to new vreg with same reg class as Def.
414 if (Def.isVirtual()) {
415 In = MRI->createVirtualRegister(MRI->getRegClass(Def));
416 if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder))
417 return false;
418 }
419
420 // Add Flag and input register operand (In) to Inst. Tie In to Def.
422 UseFlag.setMatchingOp(DefIdx);
423 Inst.addImm(UseFlag);
424 Inst.addReg(In);
425 Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1);
426 break;
427 }
428
429 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
430 OpInfo.isIndirect) {
431 LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
432 "not supported yet\n");
433 return false;
434 }
435
436 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
437 OpInfo.ConstraintType == TargetLowering::C_Other) {
438
439 std::vector<MachineOperand> Ops;
440 if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
441 OpInfo.ConstraintCode, Ops,
442 MIRBuilder)) {
443 LLVM_DEBUG(dbgs() << "Don't support constraint: "
444 << OpInfo.ConstraintCode << " yet\n");
445 return false;
446 }
447
448 assert(Ops.size() > 0 &&
449 "Expected constraint to be lowered to at least one operand");
450
451 // Add information to the INLINEASM node to know about this input.
452 const unsigned OpFlags =
454 Inst.addImm(OpFlags);
455 Inst.add(Ops);
456 break;
457 }
458
459 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
460 const InlineAsm::ConstraintCode ConstraintID =
461 TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
463 OpFlags.setMemConstraint(ConstraintID);
464 Inst.addImm(OpFlags);
465
466 if (OpInfo.isIndirect) {
467 // already indirect
468 ArrayRef<Register> SourceRegs =
469 GetOrCreateVRegs(*OpInfo.CallOperandVal);
470 if (SourceRegs.size() != 1) {
471 LLVM_DEBUG(dbgs() << "Expected the memory input to fit into a "
472 "single virtual register "
473 "for constraint '"
474 << OpInfo.ConstraintCode << "'\n");
475 return false;
476 }
477 Inst.addReg(SourceRegs[0]);
478 break;
479 }
480
481 // Needs to be made indirect. Store the value on the stack and use
482 // a pointer to it.
483 Value *OpVal = OpInfo.CallOperandVal;
484 TypeSize Bytes = DL.getTypeStoreSize(OpVal->getType());
485 Align Alignment = DL.getPrefTypeAlign(OpVal->getType());
486 int FrameIdx =
487 MF.getFrameInfo().CreateStackObject(Bytes, Alignment, false);
488
489 unsigned AddrSpace = DL.getAllocaAddrSpace();
490 LLT FramePtrTy =
491 LLT::pointer(AddrSpace, DL.getPointerSizeInBits(AddrSpace));
492 auto Ptr = MIRBuilder.buildFrameIndex(FramePtrTy, FrameIdx).getReg(0);
493 ArrayRef<Register> SourceRegs =
494 GetOrCreateVRegs(*OpInfo.CallOperandVal);
495 if (SourceRegs.size() != 1) {
496 LLVM_DEBUG(dbgs() << "Expected the memory input to fit into a single "
497 "virtual register "
498 "for constraint '"
499 << OpInfo.ConstraintCode << "'\n");
500 return false;
501 }
502 MIRBuilder.buildStore(SourceRegs[0], Ptr,
504 Alignment);
505 Inst.addReg(Ptr);
506 break;
507 }
508
509 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
510 OpInfo.ConstraintType == TargetLowering::C_Register) &&
511 "Unknown constraint type!");
512
513 if (OpInfo.isIndirect) {
514 LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
515 "for constraint '"
516 << OpInfo.ConstraintCode << "'\n");
517 return false;
518 }
519
520 // Copy the input into the appropriate registers.
521 if (OpInfo.Regs.empty()) {
523 dbgs()
524 << "Couldn't allocate input register for register constraint\n");
525 return false;
526 }
527
528 unsigned NumRegs = OpInfo.Regs.size();
529 ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
530 assert(NumRegs == SourceRegs.size() &&
531 "Expected the number of input registers to match the number of "
532 "source registers");
533
534 if (NumRegs > 1) {
535 LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
536 "not supported yet\n");
537 return false;
538 }
539
541 if (OpInfo.Regs.front().isVirtual()) {
542 // Put the register class of the virtual registers in the flag word.
543 const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
544 Flag.setRegClass(RC->getID());
545 }
546 Inst.addImm(Flag);
547 if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder))
548 return false;
549 Inst.addReg(OpInfo.Regs[0]);
550 break;
551 }
552
554
555 const unsigned NumRegs = OpInfo.Regs.size();
556 if (NumRegs > 0) {
557 unsigned Flag = InlineAsm::Flag(InlineAsm::Kind::Clobber, NumRegs);
558 Inst.addImm(Flag);
559
560 for (Register Reg : OpInfo.Regs) {
561 Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber |
562 getImplRegState(Reg.isPhysical()));
563 }
564 }
565 break;
566 }
567 }
568 }
569
570 if (auto Bundle = Call.getOperandBundle(LLVMContext::OB_convergencectrl)) {
571 auto *Token = Bundle->Inputs[0].get();
572 ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);
573 assert(SourceRegs.size() == 1 &&
574 "Expected the control token to fit into a single virtual register");
575 Inst.addUse(SourceRegs[0], RegState::Implicit);
576 }
577
578 if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
579 Inst.addMetadata(SrcLoc);
580
581 // Add rounding control registers as implicit def for inline asm.
582 if (MF.getFunction().hasFnAttribute(Attribute::StrictFP)) {
583 ArrayRef<MCPhysReg> RCRegs = TLI->getRoundingControlRegisters();
584 for (MCPhysReg Reg : RCRegs)
585 Inst.addReg(Reg, RegState::ImplicitDefine);
586 }
587
588 // All inputs are handled, insert the instruction now
589 MIRBuilder.insertInstr(Inst);
590
591 // Finally, copy the output operands into the output registers
592 ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
593 if (ResRegs.size() != OutputOperands.size()) {
594 LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
595 "number of destination registers\n");
596 return false;
597 }
598 for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
599 GISelAsmOperandInfo &OpInfo = OutputOperands[i];
600
601 if (OpInfo.Regs.empty())
602 continue;
603
604 switch (OpInfo.ConstraintType) {
607 if (OpInfo.Regs.size() > 1) {
608 LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
609 "registers are not supported yet\n");
610 return false;
611 }
612
613 Register SrcReg = OpInfo.Regs[0];
614 unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
615 LLT ResTy = MRI->getType(ResRegs[i]);
616 if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) {
617 // First copy the non-typed virtual register into a generic virtual
618 // register
619 Register Tmp1Reg =
620 MRI->createGenericVirtualRegister(LLT::scalar(SrcSize));
621 MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
622 // Need to truncate the result of the register
623 MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
624 } else if (ResTy.getSizeInBits() == SrcSize) {
625 MIRBuilder.buildCopy(ResRegs[i], SrcReg);
626 } else {
627 LLVM_DEBUG(dbgs() << "Unhandled output operand with "
628 "mismatched register size\n");
629 return false;
630 }
631
632 break;
633 }
637 dbgs() << "Cannot lower target specific output constraints yet\n");
638 return false;
640 break; // Already handled.
642 break; // Silence warning.
644 LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
645 return false;
646 }
647 }
648
649 return true;
650}
651
653 Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
654 MachineIRBuilder &MIRBuilder) const {
655 if (Constraint.size() > 1)
656 return false;
657
658 char ConstraintLetter = Constraint[0];
659 switch (ConstraintLetter) {
660 default:
661 return false;
662 case 's': // Integer immediate not known at compile time
663 if (const auto *GV = dyn_cast<GlobalValue>(Val)) {
664 Ops.push_back(MachineOperand::CreateGA(GV, /*Offset=*/0));
665 return true;
666 }
667 return false;
668 case 'i': // Simple Integer or Relocatable Constant
669 if (const auto *GV = dyn_cast<GlobalValue>(Val)) {
670 Ops.push_back(MachineOperand::CreateGA(GV, /*Offset=*/0));
671 return true;
672 }
673 [[fallthrough]];
674 case 'n': // immediate integer with a known value.
675 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
676 assert(CI->getBitWidth() <= 64 &&
677 "expected immediate to fit into 64-bits");
678 // Boolean constants should be zero-extended, others are sign-extended
679 bool IsBool = CI->getBitWidth() == 1;
680 int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
681 Ops.push_back(MachineOperand::CreateImm(ExtVal));
682 return true;
683 }
684 return false;
685 }
686}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
Module.h This file contains the declarations for the Module class.
static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx)
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
static void computeConstraintToUse(const TargetLowering *TLI, TargetLowering::AsmOperandInfo &OpInfo)
static bool buildAnyextOrCopy(Register Dst, Register Src, MachineIRBuilder &MIRBuilder)
This file describes how to lower LLVM inline asm to machine code INLINEASM.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
MachineInstr unsigned OpIdx
#define LLVM_DEBUG(...)
Definition Debug.h:114
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getCalledOperand() const
bool isConvergent() const
Determine if the invoke is convergent.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:730
bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB, std::function< ArrayRef< Register >(const Value &Val)> GetOrCreateVRegs) const
Lower the given inline asm call instruction GetOrCreateVRegs is a callback to materialize a register ...
virtual bool lowerAsmOperandForConstraint(Value *Val, StringRef Constraint, std::vector< MachineOperand > &Ops, MachineIRBuilder &MIRBuilder) const
Lower the specified operand into the Ops vector.
bool isMemKind() const
Definition InlineAsm.h:338
void setMatchingOp(unsigned OperandNo)
setMatchingOp - Augment an existing flag with information indicating that this input operand is tied ...
Definition InlineAsm.h:395
void setMemConstraint(ConstraintCode C)
setMemConstraint - Augment an existing flag with the constraint code for a memory constraint.
Definition InlineAsm.h:414
bool isRegDefEarlyClobberKind() const
Definition InlineAsm.h:333
bool isRegDefKind() const
Definition InlineAsm.h:332
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Metadata node.
Definition Metadata.h:1078
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
Class to represent struct types.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SmallVector< ConstraintPair > ConstraintGroup
std::vector< AsmOperandInfo > AsmOperandInfoVector
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
virtual const TargetLowering * getTargetLowering() const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:296
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
CallInst * Call
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
constexpr unsigned getImplRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
constexpr unsigned getEarlyClobberRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
Definition InlineAsm.h:128
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.