LLVM 23.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
25#include "llvm/IR/DIBuilder.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Debug.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE "bpf-lower"
37
38static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
39 cl::Hidden, cl::init(false),
40 cl::desc("Expand memcpy into load/store pairs in order"));
41
43 "bpf-min-jump-table-entries", cl::init(13), cl::Hidden,
44 cl::desc("Set minimum number of entries to use a jump table on BPF"));
45
46static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
47 SDValue Val = {}) {
48 std::string Str;
49 if (Val) {
50 raw_string_ostream OS(Str);
51 Val->print(OS);
52 OS << ' ';
53 }
56 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
57}
58
60 const BPFSubtarget &STI)
61 : TargetLowering(TM, STI) {
62
63 // Set up the register classes.
64 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
65 if (STI.getHasAlu32())
66 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
67
68 // Compute derived properties from the register classes
70
72
76
77 if (!STI.hasGotox())
79
81
83 if (STI.hasGotox())
85
89
90 // Set unsupported atomic operations as Custom so
91 // we can emit better error messages than fatal error
92 // from selectiondag.
93 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
94 if (VT == MVT::i32) {
95 if (STI.getHasAlu32())
96 continue;
97 } else {
99 }
100
106 }
107
108 for (auto VT : {MVT::i32, MVT::i64}) {
111 }
112
113 for (auto VT : { MVT::i32, MVT::i64 }) {
114 if (VT == MVT::i32 && !STI.getHasAlu32())
115 continue;
116
119 if (!STI.hasSdivSmod()) {
122 }
137
141 }
142
143 if (STI.getHasAlu32()) {
146 STI.getHasJmp32() ? Custom : Promote);
147 }
148
150 if (!STI.hasMovsx()) {
154 }
155
156 // Extended load operations for i1 types must be promoted
157 for (MVT VT : MVT::integer_valuetypes()) {
161
162 if (!STI.hasLdsx()) {
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
165 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
166 }
167 }
168
172
173 // Function alignments
176
178 // LLVM generic code will try to expand memcpy into load/store pairs at this
179 // stage which is before quite a few IR optimization passes, therefore the
180 // loads and stores could potentially be moved apart from each other which
181 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
182 // compilers.
183 //
184 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
185 // of memcpy to later stage in IR optimization pipeline so those load/store
186 // pairs won't be touched and could be kept in order. Hence, we set
187 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
188 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
193 } else {
194 // inline memcpy() for kernel to see explicit copy
195 unsigned CommonMaxStores =
197
202 }
203
204 // CPU/Feature control
205 HasAlu32 = STI.getHasAlu32();
206 HasJmp32 = STI.getHasJmp32();
207 HasJmpExt = STI.getHasJmpExt();
208 HasMovsx = STI.hasMovsx();
209
210 AllowsMisalignedMemAccess = STI.getAllowsMisalignedMemAccess();
211}
212
215 unsigned *Fast) const {
216 // allows-misaligned-mem-access is disabled
217 if (!AllowsMisalignedMemAccess)
218 return false;
219
220 // only allow misalignment for simple value types
221 if (!VT.isSimple())
222 return false;
223
224 // always assume fast mode when misalignment is allowed
225 if (Fast)
226 *Fast = true;
227
228 return true;
229}
230
232 return false;
233}
234
235bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
236 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
237 return false;
238 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
239 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
240 return NumBits1 > NumBits2;
241}
242
243bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
244 if (!VT1.isInteger() || !VT2.isInteger())
245 return false;
246 unsigned NumBits1 = VT1.getSizeInBits();
247 unsigned NumBits2 = VT2.getSizeInBits();
248 return NumBits1 > NumBits2;
249}
250
251bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
252 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
253 return false;
254 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
255 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
256 return NumBits1 == 32 && NumBits2 == 64;
257}
258
259bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
260 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
261 return false;
262 unsigned NumBits1 = VT1.getSizeInBits();
263 unsigned NumBits2 = VT2.getSizeInBits();
264 return NumBits1 == 32 && NumBits2 == 64;
265}
266
267bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
268 EVT VT1 = Val.getValueType();
269 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
270 MVT MT1 = VT1.getSimpleVT().SimpleTy;
271 MVT MT2 = VT2.getSimpleVT().SimpleTy;
272 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
273 (MT2 == MVT::i32 || MT2 == MVT::i64))
274 return true;
275 }
276 return TargetLoweringBase::isZExtFree(Val, VT2);
277}
278
282
285 if (Constraint.size() == 1) {
286 switch (Constraint[0]) {
287 default:
288 break;
289 case 'w':
290 return C_RegisterClass;
291 }
292 }
293
294 return TargetLowering::getConstraintType(Constraint);
295}
296
297std::pair<unsigned, const TargetRegisterClass *>
299 StringRef Constraint,
300 MVT VT) const {
301 if (Constraint.size() == 1) {
302 // GCC Constraint Letters
303 switch (Constraint[0]) {
304 case 'r': // GENERAL_REGS
305 return std::make_pair(0U, &BPF::GPRRegClass);
306 case 'w':
307 if (HasAlu32)
308 return std::make_pair(0U, &BPF::GPR32RegClass);
309 break;
310 default:
311 break;
312 }
313 }
314
316}
317
318void BPFTargetLowering::ReplaceNodeResults(
320 const char *Msg;
321 uint32_t Opcode = N->getOpcode();
322 switch (Opcode) {
323 default:
324 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
329 case ISD::ATOMIC_SWAP:
331 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
332 Msg = "unsupported atomic operation, please use 32/64 bit version";
333 else
334 Msg = "unsupported atomic operation, please use 64 bit version";
335 break;
336 case ISD::ATOMIC_LOAD:
338 return;
339 }
340
341 SDLoc DL(N);
342 // We'll still produce a fatal error downstream, but this diagnostic is more
343 // user-friendly.
344 fail(DL, DAG, Msg);
345}
346
348 switch (Op.getOpcode()) {
349 default:
350 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
351 case ISD::BR_CC:
352 return LowerBR_CC(Op, DAG);
353 case ISD::JumpTable:
354 return LowerJumpTable(Op, DAG);
356 return LowerGlobalAddress(Op, DAG);
358 return LowerConstantPool(Op, DAG);
360 return LowerBlockAddress(Op, DAG);
361 case ISD::SELECT_CC:
362 return LowerSELECT_CC(Op, DAG);
363 case ISD::SDIV:
364 case ISD::SREM:
365 return LowerSDIVSREM(Op, DAG);
366 case ISD::SHL_PARTS:
367 case ISD::SRL_PARTS:
368 case ISD::SRA_PARTS:
369 return LowerShiftParts(Op, DAG);
371 return LowerDYNAMIC_STACKALLOC(Op, DAG);
372 case ISD::ATOMIC_LOAD:
374 return LowerATOMIC_LOAD_STORE(Op, DAG);
375 case ISD::TRAP:
376 return LowerTRAP(Op, DAG);
377 }
378}
379
380// Calling Convention Implementation
381#include "BPFGenCallingConv.inc"
382
383SDValue BPFTargetLowering::LowerFormalArguments(
384 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
385 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
386 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
387 switch (CallConv) {
388 default:
389 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
390 case CallingConv::C:
392 break;
393 }
394
397
398 // Assign locations to all of the incoming arguments.
400 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
401 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
402
403 bool HasMemArgs = false;
404 for (size_t I = 0; I < ArgLocs.size(); ++I) {
405 auto &VA = ArgLocs[I];
406
407 if (VA.isRegLoc()) {
408 // Arguments passed in registers
409 EVT RegVT = VA.getLocVT();
410 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
411 switch (SimpleTy) {
412 default: {
413 std::string Str;
414 {
415 raw_string_ostream OS(Str);
416 RegVT.print(OS);
417 }
418 report_fatal_error("unhandled argument type: " + Twine(Str));
419 }
420 case MVT::i32:
421 case MVT::i64:
422 Register VReg = RegInfo.createVirtualRegister(
423 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
424 RegInfo.addLiveIn(VA.getLocReg(), VReg);
425 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
426
427 // If this is an value that has been promoted to wider types, insert an
428 // assert[sz]ext to capture this, then truncate to the right size.
429 if (VA.getLocInfo() == CCValAssign::SExt)
430 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
431 DAG.getValueType(VA.getValVT()));
432 else if (VA.getLocInfo() == CCValAssign::ZExt)
433 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
434 DAG.getValueType(VA.getValVT()));
435
436 if (VA.getLocInfo() != CCValAssign::Full)
437 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
438
439 InVals.push_back(ArgValue);
440
441 break;
442 }
443 } else {
444 if (VA.isMemLoc())
445 HasMemArgs = true;
446 else
447 report_fatal_error("unhandled argument location");
448 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
449 }
450 }
451 if (HasMemArgs)
452 fail(DL, DAG, "stack arguments are not supported");
453 if (IsVarArg)
454 fail(DL, DAG, "variadic functions are not supported");
455 return Chain;
456}
457
458const size_t BPFTargetLowering::MaxArgs = 5;
459
460static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask,
461 MCRegister Reg) {
462 for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
463 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
464}
465
467 MachineFunction &MF,
468 const uint32_t *BaseRegMask) {
469 uint32_t *RegMask = MF.allocateRegMask();
470 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
471 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);
472 return RegMask;
473}
474
475SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
476 SmallVectorImpl<SDValue> &InVals) const {
477 SelectionDAG &DAG = CLI.DAG;
478 auto &Outs = CLI.Outs;
479 auto &OutVals = CLI.OutVals;
480 auto &Ins = CLI.Ins;
481 SDValue Chain = CLI.Chain;
482 SDValue Callee = CLI.Callee;
483 bool &IsTailCall = CLI.IsTailCall;
484 CallingConv::ID CallConv = CLI.CallConv;
485 bool IsVarArg = CLI.IsVarArg;
486 MachineFunction &MF = DAG.getMachineFunction();
487
488 // BPF target does not support tail call optimization.
489 IsTailCall = false;
490
491 switch (CallConv) {
492 default:
493 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
495 case CallingConv::C:
496 break;
497 }
498
499 // Analyze operands of the call, assigning locations to each operand.
501 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
502
503 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
504
505 unsigned NumBytes = CCInfo.getStackSize();
506
507 if (Outs.size() > MaxArgs)
508 fail(CLI.DL, DAG, "too many arguments", Callee);
509
510 for (auto &Arg : Outs) {
511 ISD::ArgFlagsTy Flags = Arg.Flags;
512 if (!Flags.isByVal())
513 continue;
514 fail(CLI.DL, DAG, "pass by value not supported", Callee);
515 break;
516 }
517
518 auto PtrVT = getPointerTy(MF.getDataLayout());
519 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
520
521 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
522
523 // Walk arg assignments
524 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {
525 CCValAssign &VA = ArgLocs[i];
526 SDValue &Arg = OutVals[i];
527
528 // Promote the value if needed.
529 switch (VA.getLocInfo()) {
530 default:
531 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
533 break;
535 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
536 break;
538 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
539 break;
541 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
542 break;
543 }
544
545 // Push arguments into RegsToPass vector
546 if (VA.isRegLoc())
547 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
548 else
549 report_fatal_error("stack arguments are not supported");
550 }
551
552 SDValue InGlue;
553
554 // Build a sequence of copy-to-reg nodes chained together with token chain and
555 // flag operands which copy the outgoing args into registers. The InGlue in
556 // necessary since all emitted instructions must be stuck together.
557 for (auto &Reg : RegsToPass) {
558 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
559 InGlue = Chain.getValue(1);
560 }
561
562 // If the callee is a GlobalAddress node (quite common, every direct call is)
563 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
564 // Likewise ExternalSymbol -> TargetExternalSymbol.
565 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
566 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
567 G->getOffset(), 0);
568 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
569 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
570 StringRef Sym = E->getSymbol();
571 if (Sym != BPF_TRAP && Sym != "__multi3" && Sym != "__divti3" &&
572 Sym != "__modti3" && Sym != "__udivti3" && Sym != "__umodti3")
573 fail(
574 CLI.DL, DAG,
575 Twine("A call to built-in function '" + Sym + "' is not supported."));
576 }
577
578 // Returns a chain & a flag for retval copy to use.
579 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
581 Ops.push_back(Chain);
582 Ops.push_back(Callee);
583
584 // Add argument registers to the end of the list so that they are
585 // known live into the call.
586 for (auto &Reg : RegsToPass)
587 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
588
589 bool HasFastCall =
590 (CLI.CB && isa<CallInst>(CLI.CB) && CLI.CB->hasFnAttr("bpf_fastcall"));
591 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
592 if (HasFastCall) {
593 uint32_t *RegMask = regMaskFromTemplate(
594 TRI, MF, TRI->getCallPreservedMask(MF, CallingConv::PreserveAll));
595 for (auto const &RegPair : RegsToPass)
596 resetRegMaskBit(TRI, RegMask, RegPair.first);
597 if (!CLI.CB->getType()->isVoidTy())
598 resetRegMaskBit(TRI, RegMask, BPF::R0);
599 Ops.push_back(DAG.getRegisterMask(RegMask));
600 } else {
601 Ops.push_back(
602 DAG.getRegisterMask(TRI->getCallPreservedMask(MF, CLI.CallConv)));
603 }
604
605 if (InGlue.getNode())
606 Ops.push_back(InGlue);
607
608 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
609 InGlue = Chain.getValue(1);
610
611 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
612
613 // Create the CALLSEQ_END node.
614 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
615 InGlue = Chain.getValue(1);
616
617 // Handle result values, copying them out of physregs into vregs that we
618 // return.
619 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
620 InVals);
621}
622
624BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
625 bool IsVarArg,
627 const SmallVectorImpl<SDValue> &OutVals,
628 const SDLoc &DL, SelectionDAG &DAG) const {
629 unsigned Opc = BPFISD::RET_GLUE;
630
631 // CCValAssign - represent the assignment of the return value to a location
633 MachineFunction &MF = DAG.getMachineFunction();
634
635 // CCState - Info about the registers and stack slot.
636 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
637
638 // Analize return values.
639 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
640
641 SDValue Glue;
642 SmallVector<SDValue, 4> RetOps(1, Chain);
643
644 // Copy the result values into the output registers.
645 for (size_t i = 0; i != RVLocs.size(); ++i) {
646 CCValAssign &VA = RVLocs[i];
647 if (!VA.isRegLoc())
648 report_fatal_error("stack return values are not supported");
649
650 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
651
652 // Guarantee that all emitted copies are stuck together,
653 // avoiding something bad.
654 Glue = Chain.getValue(1);
655 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
656 }
657
658 RetOps[0] = Chain; // Update chain.
659
660 // Add the glue if we have it.
661 if (Glue.getNode())
662 RetOps.push_back(Glue);
663
664 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
665}
666
667SDValue BPFTargetLowering::LowerCallResult(
668 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
669 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
670 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
671
672 MachineFunction &MF = DAG.getMachineFunction();
673 // Assign locations to each value returned by this call.
675 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
676
677 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
678
679 // Copy all of the result registers out of their specified physreg.
680 for (auto &Val : RVLocs) {
681 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
682 Val.getValVT(), InGlue).getValue(1);
683 InGlue = Chain.getValue(2);
684 InVals.push_back(Chain.getValue(0));
685 }
686
687 return Chain;
688}
689
691 switch (CC) {
692 default:
693 break;
694 case ISD::SETULT:
695 case ISD::SETULE:
696 case ISD::SETLT:
697 case ISD::SETLE:
699 std::swap(LHS, RHS);
700 break;
701 }
702}
703
704SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
705 SDLoc DL(Op);
706 fail(DL, DAG,
707 "unsupported signed division, please convert to unsigned div/mod.");
708 return DAG.getUNDEF(Op->getValueType(0));
709}
710
711SDValue BPFTargetLowering::LowerShiftParts(SDValue Op,
712 SelectionDAG &DAG) const {
713 SDValue Lo, Hi;
714 expandShiftParts(Op.getNode(), Lo, Hi, DAG);
715 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
716}
717
718SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
719 SelectionDAG &DAG) const {
720 SDLoc DL(Op);
721 fail(DL, DAG, "unsupported dynamic stack allocation");
722 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
723 return DAG.getMergeValues(Ops, SDLoc());
724}
725
726SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
727 SDValue Chain = Op.getOperand(0);
728 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
729 SDValue LHS = Op.getOperand(2);
730 SDValue RHS = Op.getOperand(3);
731 SDValue Dest = Op.getOperand(4);
732 SDLoc DL(Op);
733
734 if (!getHasJmpExt())
735 NegateCC(LHS, RHS, CC);
736
737 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
738 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
739}
740
741SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
742 SDValue LHS = Op.getOperand(0);
743 SDValue RHS = Op.getOperand(1);
744 SDValue TrueV = Op.getOperand(2);
745 SDValue FalseV = Op.getOperand(3);
746 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
747 SDLoc DL(Op);
748
749 if (!getHasJmpExt())
750 NegateCC(LHS, RHS, CC);
751
752 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
753 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
754
755 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);
756}
757
758SDValue BPFTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
759 SelectionDAG &DAG) const {
760 SDNode *N = Op.getNode();
761 SDLoc DL(N);
762
763 if (cast<AtomicSDNode>(N)->getMergedOrdering() ==
765 fail(DL, DAG,
766 "sequentially consistent (seq_cst) "
767 "atomic load/store is not supported");
768
769 return Op;
770}
771
773 if (auto *Fn = M->getFunction(BPF_TRAP))
774 return Fn;
775
776 FunctionType *FT = FunctionType::get(Type::getVoidTy(M->getContext()), false);
777 Function *NewF =
779 NewF->setDSOLocal(true);
781 NewF->setSection(".ksyms");
782
783 if (M->debug_compile_units().empty())
784 return NewF;
785
786 DIBuilder DBuilder(*M);
787 DITypeArray ParamTypes =
788 DBuilder.getOrCreateTypeArray({nullptr /*void return*/});
789 DISubroutineType *FuncType = DBuilder.createSubroutineType(ParamTypes);
790 DICompileUnit *CU = *M->debug_compile_units_begin();
791 DISubprogram *SP =
792 DBuilder.createFunction(CU, BPF_TRAP, BPF_TRAP, nullptr, 0, FuncType, 0,
793 DINode::FlagZero, DISubprogram::SPFlagZero);
794 NewF->setSubprogram(SP);
795 return NewF;
796}
797
798SDValue BPFTargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
799 MachineFunction &MF = DAG.getMachineFunction();
800 TargetLowering::CallLoweringInfo CLI(DAG);
802 SDNode *N = Op.getNode();
803 SDLoc DL(N);
804
806 auto PtrVT = getPointerTy(MF.getDataLayout());
807 CLI.Callee = DAG.getTargetGlobalAddress(Fn, DL, PtrVT);
808 CLI.Chain = N->getOperand(0);
809 CLI.IsTailCall = false;
811 CLI.IsVarArg = false;
812 CLI.DL = std::move(DL);
813 CLI.NoMerge = false;
814 CLI.DoesNotReturn = true;
815 return LowerCall(CLI, InVals);
816}
817
818SDValue BPFTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
819 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
820 return getAddr(N, DAG);
821}
822
824 SelectionDAG &DAG, unsigned Flags) {
825 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
826 N->getOffset(), Flags);
827}
828
830 SelectionDAG &DAG, unsigned Flags) {
831 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
832}
833
834template <class NodeTy>
835SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
836 unsigned Flags) const {
837 SDLoc DL(N);
838
839 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
840
841 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
842}
843
844SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
845 SelectionDAG &DAG) const {
846 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
847 if (N->getOffset() != 0)
848 report_fatal_error("invalid offset for global address: " +
849 Twine(N->getOffset()));
850
851 const GlobalValue *GVal = N->getGlobal();
852 SDLoc DL(Op);
853
854 // Wrap it in a TargetGlobalAddress
855 SDValue Addr = DAG.getTargetGlobalAddress(GVal, DL, MVT::i64);
856
857 // Emit pseudo instruction
858 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
859}
860
861SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
862 SelectionDAG &DAG) const {
863 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
864
865 return getAddr(N, DAG);
866}
867
868SDValue BPFTargetLowering::LowerBlockAddress(SDValue Op,
869 SelectionDAG &DAG) const {
870 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
871 SDLoc DL(Op);
872
873 // Wrap it in a TargetBlockAddress
874 SDValue Addr = DAG.getTargetBlockAddress(BA, MVT::i64);
875
876 // Emit pseudo instruction
877 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
878}
879
880unsigned
881BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
882 unsigned Reg, bool isSigned) const {
883 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
884 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
885 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
886 MachineFunction *F = BB->getParent();
887 DebugLoc DL = MI.getDebugLoc();
888
889 MachineRegisterInfo &RegInfo = F->getRegInfo();
890
891 if (!isSigned) {
892 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
893 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
894 return PromotedReg0;
895 }
896 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
897 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
898 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
899 if (HasMovsx) {
900 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
901 } else {
902 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
903 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
904 .addReg(PromotedReg0).addImm(32);
905 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
906 .addReg(PromotedReg1).addImm(32);
907 }
908
909 return PromotedReg2;
910}
911
913BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
915 const {
916 MachineFunction *MF = MI.getParent()->getParent();
917 MachineRegisterInfo &MRI = MF->getRegInfo();
918 MachineInstrBuilder MIB(*MF, MI);
919 unsigned ScratchReg;
920
921 // This function does custom insertion during lowering BPFISD::MEMCPY which
922 // only has two register operands from memcpy semantics, the copy source
923 // address and the copy destination address.
924 //
925 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
926 // a third scratch register to serve as the destination register of load and
927 // source register of store.
928 //
929 // The scratch register here is with the Define | Dead | EarlyClobber flags.
930 // The EarlyClobber flag has the semantic property that the operand it is
931 // attached to is clobbered before the rest of the inputs are read. Hence it
932 // must be unique among the operands to the instruction. The Define flag is
933 // needed to coerce the machine verifier that an Undef value isn't a problem
934 // as we anyway is loading memory into it. The Dead flag is needed as the
935 // value in scratch isn't supposed to be used by any other instruction.
936 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
937 MIB.addReg(ScratchReg,
939
940 return BB;
941}
942
943MachineBasicBlock *BPFTargetLowering::EmitInstrWithCustomInserterLDimm64(
944 MachineInstr &MI, MachineBasicBlock *BB) const {
945 MachineFunction *MF = BB->getParent();
946 const BPFInstrInfo *TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
947 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
948 MachineRegisterInfo &RegInfo = MF->getRegInfo();
949 DebugLoc DL = MI.getDebugLoc();
950
951 // Build address taken map for Global Varaibles and BlockAddresses
952 DenseMap<const BasicBlock *, MachineBasicBlock *> AddressTakenBBs;
953 for (MachineBasicBlock &MBB : *MF) {
954 if (const BasicBlock *BB = MBB.getBasicBlock())
955 if (BB->hasAddressTaken())
956 AddressTakenBBs[BB] = &MBB;
957 }
958
959 MachineOperand &MO = MI.getOperand(1);
960 assert(MO.isBlockAddress() || MO.isGlobal());
961
962 Register ResultReg = MI.getOperand(0).getReg();
963 Register TmpReg = RegInfo.createVirtualRegister(RC);
964
965 std::vector<MachineBasicBlock *> Targets;
966 unsigned JTI;
967
968 if (MO.isBlockAddress()) {
969 auto *BA = MO.getBlockAddress();
970 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
971 assert(TgtMBB);
972
973 Targets.push_back(TgtMBB);
974 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
975 ->createJumpTableIndex(Targets);
976
977 BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), TmpReg)
978 .addJumpTableIndex(JTI);
979 BuildMI(*BB, MI, DL, TII->get(BPF::LDD), ResultReg)
980 .addReg(TmpReg)
981 .addImm(0);
982 MI.eraseFromParent();
983 return BB;
984 }
985
986 // Helper: emit LD_imm64 with operand GlobalAddress or JumpTable
987 auto emitLDImm64 = [&](const GlobalValue *GV = nullptr, unsigned JTI = -1) {
988 auto MIB = BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), ResultReg);
989 if (GV)
990 MIB.addGlobalAddress(GV);
991 else
992 MIB.addJumpTableIndex(JTI);
993 MI.eraseFromParent();
994 return BB;
995 };
996
997 // Must be a global at this point
998 const GlobalValue *GVal = MO.getGlobal();
999 const auto *GV = dyn_cast<GlobalVariable>(GVal);
1000
1001 if (!GV || GV->getLinkage() != GlobalValue::PrivateLinkage ||
1002 !GV->isConstant() || !GV->hasInitializer())
1003 return emitLDImm64(GVal);
1004
1005 const auto *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1006 if (!CA)
1007 return emitLDImm64(GVal);
1008
1009 for (const Use &Op : CA->operands()) {
1010 if (!isa<BlockAddress>(Op))
1011 return emitLDImm64(GVal);
1012 auto *BA = cast<BlockAddress>(Op);
1013 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
1014 assert(TgtMBB);
1015 Targets.push_back(TgtMBB);
1016 }
1017
1018 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
1019 ->createJumpTableIndex(Targets);
1020 return emitLDImm64(nullptr, JTI);
1021}
1022
1025 MachineBasicBlock *BB) const {
1027 DebugLoc DL = MI.getDebugLoc();
1028 unsigned Opc = MI.getOpcode();
1029 bool isSelectRROp = (Opc == BPF::Select ||
1030 Opc == BPF::Select_64_32 ||
1031 Opc == BPF::Select_32 ||
1032 Opc == BPF::Select_32_64);
1033
1034 bool isMemcpyOp = Opc == BPF::MEMCPY;
1035 bool isLDimm64Op = Opc == BPF::LDIMM64;
1036
1037#ifndef NDEBUG
1038 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
1039 Opc == BPF::Select_Ri_64_32 ||
1040 Opc == BPF::Select_Ri_32 ||
1041 Opc == BPF::Select_Ri_32_64);
1042
1043 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp || isLDimm64Op))
1044 report_fatal_error("unhandled instruction type: " + Twine(Opc));
1045#endif
1046
1047 if (isMemcpyOp)
1048 return EmitInstrWithCustomInserterMemcpy(MI, BB);
1049
1050 if (isLDimm64Op)
1051 return EmitInstrWithCustomInserterLDimm64(MI, BB);
1052
1053 bool is32BitCmp = (Opc == BPF::Select_32 ||
1054 Opc == BPF::Select_32_64 ||
1055 Opc == BPF::Select_Ri_32 ||
1056 Opc == BPF::Select_Ri_32_64);
1057
1058 // To "insert" a SELECT instruction, we actually have to insert the diamond
1059 // control-flow pattern. The incoming instruction knows the destination vreg
1060 // to set, the condition code register to branch on, the true/false values to
1061 // select between, and a branch opcode to use.
1062 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1064
1065 // ThisMBB:
1066 // ...
1067 // TrueVal = ...
1068 // jmp_XX r1, r2 goto Copy1MBB
1069 // fallthrough --> Copy0MBB
1070 MachineBasicBlock *ThisMBB = BB;
1071 MachineFunction *F = BB->getParent();
1072 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1073 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
1074
1075 F->insert(I, Copy0MBB);
1076 F->insert(I, Copy1MBB);
1077 // Update machine-CFG edges by transferring all successors of the current
1078 // block to the new block which will contain the Phi node for the select.
1079 Copy1MBB->splice(Copy1MBB->begin(), BB,
1080 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1081 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
1082 // Next, add the true and fallthrough blocks as its successors.
1083 BB->addSuccessor(Copy0MBB);
1084 BB->addSuccessor(Copy1MBB);
1085
1086 // Insert Branch if Flag
1087 int CC = MI.getOperand(3).getImm();
1088 int NewCC;
1089 switch (CC) {
1090#define SET_NEWCC(X, Y) \
1091 case ISD::X: \
1092 if (is32BitCmp && HasJmp32) \
1093 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
1094 else \
1095 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
1096 break
1097 SET_NEWCC(SETGT, JSGT);
1098 SET_NEWCC(SETUGT, JUGT);
1099 SET_NEWCC(SETGE, JSGE);
1100 SET_NEWCC(SETUGE, JUGE);
1101 SET_NEWCC(SETEQ, JEQ);
1102 SET_NEWCC(SETNE, JNE);
1103 SET_NEWCC(SETLT, JSLT);
1104 SET_NEWCC(SETULT, JULT);
1105 SET_NEWCC(SETLE, JSLE);
1106 SET_NEWCC(SETULE, JULE);
1107 default:
1108 report_fatal_error("unimplemented select CondCode " + Twine(CC));
1109 }
1110
1111 Register LHS = MI.getOperand(1).getReg();
1112 bool isSignedCmp = (CC == ISD::SETGT ||
1113 CC == ISD::SETGE ||
1114 CC == ISD::SETLT ||
1115 CC == ISD::SETLE);
1116
1117 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
1118 // to be promoted, however if the 32-bit comparison operands are destination
1119 // registers then they are implicitly zero-extended already, there is no
1120 // need of explicit zero-extend sequence for them.
1121 //
1122 // We simply do extension for all situations in this method, but we will
1123 // try to remove those unnecessary in BPFMIPeephole pass.
1124 if (is32BitCmp && !HasJmp32)
1125 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
1126
1127 if (isSelectRROp) {
1128 Register RHS = MI.getOperand(2).getReg();
1129
1130 if (is32BitCmp && !HasJmp32)
1131 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
1132
1133 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
1134 } else {
1135 int64_t imm32 = MI.getOperand(2).getImm();
1136 // Check before we build J*_ri instruction.
1137 if (!isInt<32>(imm32))
1138 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
1139 BuildMI(BB, DL, TII.get(NewCC))
1140 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
1141 }
1142
1143 // Copy0MBB:
1144 // %FalseValue = ...
1145 // # fallthrough to Copy1MBB
1146 BB = Copy0MBB;
1147
1148 // Update machine-CFG edges
1149 BB->addSuccessor(Copy1MBB);
1150
1151 // Copy1MBB:
1152 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
1153 // ...
1154 BB = Copy1MBB;
1155 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
1156 .addReg(MI.getOperand(5).getReg())
1157 .addMBB(Copy0MBB)
1158 .addReg(MI.getOperand(4).getReg())
1159 .addMBB(ThisMBB);
1160
1161 MI.eraseFromParent(); // The pseudo instruction is gone now.
1162 return BB;
1163}
1164
1166 EVT VT) const {
1167 return getHasAlu32() ? MVT::i32 : MVT::i64;
1168}
1169
1171 EVT VT) const {
1172 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
1173}
1174
1175bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1176 const AddrMode &AM, Type *Ty,
1177 unsigned AS,
1178 Instruction *I) const {
1179 // No global is ever allowed as a base.
1180 if (AM.BaseGV)
1181 return false;
1182
1183 switch (AM.Scale) {
1184 case 0: // "r+i" or just "i", depending on HasBaseReg.
1185 break;
1186 case 1:
1187 if (!AM.HasBaseReg) // allow "r+i".
1188 break;
1189 return false; // disallow "r+r" or "r+r+i".
1190 default:
1191 return false;
1192 }
1193
1194 return true;
1195}
1196
1197bool BPFTargetLowering::CanLowerReturn(
1198 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
1200 const Type *RetTy) const {
1202 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1203 return CCInfo.CheckReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
1204}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)
static Function * createBPFUnreachable(Module *M)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static cl::opt< unsigned > BPFMinimumJumpTableEntries("bpf-min-jump-table-entries", cl::init(13), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on BPF"))
static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
#define BPF_TRAP
Definition BPF.h:25
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
bool getAllowsMisalignedMemAccess() const
bool getHasJmpExt() const
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
bool hasLdsx() const
bool hasGotox() const
bool hasMovsx() const
bool getHasJmp32() const
const BPFRegisterInfo * getRegisterInfo() const override
bool getHasAlu32() const
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *) const override
Determine if the target supports unaligned memory accesses.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BasicBlock * getBasicBlock() const
Definition Constants.h:1100
CCState - This class holds information needed while lowering arguments and return values.
Register getLocReg() const
LocInfo getLocInfo() const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
LLVM_ABI DISubroutineType * createSubroutineType(DITypeArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)
Create subroutine type.
LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)
Create a new descriptor for the specified subprogram.
LLVM_ABI DITypeArray getOrCreateTypeArray(ArrayRef< Metadata * > Elements)
Get a DITypeArray, create one if required.
Subprogram description. Uses SubclassData1.
Type array for a subprogram.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void setCallingConv(CallingConv::ID CC)
Definition Function.h:276
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:284
LinkageTypes getLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags
Flags values. These may be or'd together.
const GlobalValue * getGlobal() const
const BlockAddress * getBlockAddress() const
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
self_iterator getIterator()
Definition ilist_node.h:123
A raw_ostream that writes to an std::string.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:788
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ GlobalAddress
Definition ISDOpcodes.h:88
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:787
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:811
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:888
@ TRAP
TRAP - Trapping instruction.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:833
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ Dead
Unused definition.
@ EarlyClobber
Register definition happens before uses.
@ Define
Register definition.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1152
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
void print(raw_ostream &OS) const
Implement operator<<.
Definition ValueTypes.h:512
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs