LLVM 22.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
25#include "llvm/IR/DIBuilder.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Debug.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE "bpf-lower"
37
38static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
39 cl::Hidden, cl::init(false),
40 cl::desc("Expand memcpy into load/store pairs in order"));
41
43 "bpf-min-jump-table-entries", cl::init(13), cl::Hidden,
44 cl::desc("Set minimum number of entries to use a jump table on BPF"));
45
46static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
47 SDValue Val = {}) {
48 std::string Str;
49 if (Val) {
50 raw_string_ostream OS(Str);
51 Val->print(OS);
52 OS << ' ';
53 }
56 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
57}
58
60 const BPFSubtarget &STI)
61 : TargetLowering(TM, STI) {
62
63 // Set up the register classes.
64 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
65 if (STI.getHasAlu32())
66 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
67
68 // Compute derived properties from the register classes
70
72
73 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
74 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
75 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
76
77 if (!STI.hasGotox())
78 setOperationAction(ISD::BRIND, MVT::Other, Expand);
79
80 setOperationAction(ISD::TRAP, MVT::Other, Custom);
81
83 if (STI.hasGotox())
85
86 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
87 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
88 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
89
90 // Set unsupported atomic operations as Custom so
91 // we can emit better error messages than fatal error
92 // from selectiondag.
93 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
94 if (VT == MVT::i32) {
95 if (STI.getHasAlu32())
96 continue;
97 } else {
98 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
99 }
100
101 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
102 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
103 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
104 setOperationAction(ISD::ATOMIC_SWAP, VT, Custom);
105 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
106 }
107
108 for (auto VT : {MVT::i32, MVT::i64}) {
109 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
110 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
111 }
112
113 for (auto VT : { MVT::i32, MVT::i64 }) {
114 if (VT == MVT::i32 && !STI.getHasAlu32())
115 continue;
116
119 if (!STI.hasSdivSmod()) {
122 }
137
141 }
142
143 if (STI.getHasAlu32()) {
145 setOperationAction(ISD::BR_CC, MVT::i32,
146 STI.getHasJmp32() ? Custom : Promote);
147 }
148
150 if (!STI.hasMovsx()) {
154 }
155
156 // Extended load operations for i1 types must be promoted
157 for (MVT VT : MVT::integer_valuetypes()) {
161
162 if (!STI.hasLdsx()) {
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
165 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
166 }
167 }
168
172
173 // Function alignments
176
178 // LLVM generic code will try to expand memcpy into load/store pairs at this
179 // stage which is before quite a few IR optimization passes, therefore the
180 // loads and stores could potentially be moved apart from each other which
181 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
182 // compilers.
183 //
184 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
185 // of memcpy to later stage in IR optimization pipeline so those load/store
186 // pairs won't be touched and could be kept in order. Hence, we set
187 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
188 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
193 } else {
194 // inline memcpy() for kernel to see explicit copy
195 unsigned CommonMaxStores =
197
202 }
203
204 // CPU/Feature control
205 HasAlu32 = STI.getHasAlu32();
206 HasJmp32 = STI.getHasJmp32();
207 HasJmpExt = STI.getHasJmpExt();
208 HasMovsx = STI.hasMovsx();
209
210 AllowsMisalignedMemAccess = STI.getAllowsMisalignedMemAccess();
211 AllowBuiltinCalls = STI.getAllowBuiltinCalls();
212}
213
216 unsigned *Fast) const {
217 // allows-misaligned-mem-access is disabled
218 if (!AllowsMisalignedMemAccess)
219 return false;
220
221 // only allow misalignment for simple value types
222 if (!VT.isSimple())
223 return false;
224
225 // always assume fast mode when misalignment is allowed
226 if (Fast)
227 *Fast = true;
228
229 return true;
230}
231
233 return false;
234}
235
236bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
237 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
238 return false;
239 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
240 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
241 return NumBits1 > NumBits2;
242}
243
244bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
245 if (!VT1.isInteger() || !VT2.isInteger())
246 return false;
247 unsigned NumBits1 = VT1.getSizeInBits();
248 unsigned NumBits2 = VT2.getSizeInBits();
249 return NumBits1 > NumBits2;
250}
251
252bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
253 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
254 return false;
255 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
256 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
257 return NumBits1 == 32 && NumBits2 == 64;
258}
259
260bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
261 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
262 return false;
263 unsigned NumBits1 = VT1.getSizeInBits();
264 unsigned NumBits2 = VT2.getSizeInBits();
265 return NumBits1 == 32 && NumBits2 == 64;
266}
267
268bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
269 EVT VT1 = Val.getValueType();
270 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
271 MVT MT1 = VT1.getSimpleVT().SimpleTy;
272 MVT MT2 = VT2.getSimpleVT().SimpleTy;
273 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
274 (MT2 == MVT::i32 || MT2 == MVT::i64))
275 return true;
276 }
277 return TargetLoweringBase::isZExtFree(Val, VT2);
278}
279
283
286 if (Constraint.size() == 1) {
287 switch (Constraint[0]) {
288 default:
289 break;
290 case 'w':
291 return C_RegisterClass;
292 }
293 }
294
295 return TargetLowering::getConstraintType(Constraint);
296}
297
298std::pair<unsigned, const TargetRegisterClass *>
300 StringRef Constraint,
301 MVT VT) const {
302 if (Constraint.size() == 1) {
303 // GCC Constraint Letters
304 switch (Constraint[0]) {
305 case 'r': // GENERAL_REGS
306 return std::make_pair(0U, &BPF::GPRRegClass);
307 case 'w':
308 if (HasAlu32)
309 return std::make_pair(0U, &BPF::GPR32RegClass);
310 break;
311 default:
312 break;
313 }
314 }
315
317}
318
319void BPFTargetLowering::ReplaceNodeResults(
321 const char *Msg;
322 uint32_t Opcode = N->getOpcode();
323 switch (Opcode) {
324 default:
325 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
326 case ISD::ATOMIC_LOAD_ADD:
327 case ISD::ATOMIC_LOAD_AND:
328 case ISD::ATOMIC_LOAD_OR:
329 case ISD::ATOMIC_LOAD_XOR:
330 case ISD::ATOMIC_SWAP:
331 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
332 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
333 Msg = "unsupported atomic operation, please use 32/64 bit version";
334 else
335 Msg = "unsupported atomic operation, please use 64 bit version";
336 break;
337 case ISD::ATOMIC_LOAD:
338 case ISD::ATOMIC_STORE:
339 return;
340 }
341
342 SDLoc DL(N);
343 // We'll still produce a fatal error downstream, but this diagnostic is more
344 // user-friendly.
345 fail(DL, DAG, Msg);
346}
347
349 switch (Op.getOpcode()) {
350 default:
351 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
352 case ISD::BR_CC:
353 return LowerBR_CC(Op, DAG);
354 case ISD::JumpTable:
355 return LowerJumpTable(Op, DAG);
357 return LowerGlobalAddress(Op, DAG);
359 return LowerConstantPool(Op, DAG);
361 return LowerBlockAddress(Op, DAG);
362 case ISD::SELECT_CC:
363 return LowerSELECT_CC(Op, DAG);
364 case ISD::SDIV:
365 case ISD::SREM:
366 return LowerSDIVSREM(Op, DAG);
367 case ISD::DYNAMIC_STACKALLOC:
368 return LowerDYNAMIC_STACKALLOC(Op, DAG);
369 case ISD::ATOMIC_LOAD:
370 case ISD::ATOMIC_STORE:
371 return LowerATOMIC_LOAD_STORE(Op, DAG);
372 case ISD::TRAP:
373 return LowerTRAP(Op, DAG);
374 }
375}
376
377// Calling Convention Implementation
378#include "BPFGenCallingConv.inc"
379
380SDValue BPFTargetLowering::LowerFormalArguments(
381 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
382 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
383 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
384 switch (CallConv) {
385 default:
386 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
387 case CallingConv::C:
389 break;
390 }
391
394
395 // Assign locations to all of the incoming arguments.
397 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
398 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
399
400 bool HasMemArgs = false;
401 for (size_t I = 0; I < ArgLocs.size(); ++I) {
402 auto &VA = ArgLocs[I];
403
404 if (VA.isRegLoc()) {
405 // Arguments passed in registers
406 EVT RegVT = VA.getLocVT();
407 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
408 switch (SimpleTy) {
409 default: {
410 std::string Str;
411 {
412 raw_string_ostream OS(Str);
413 RegVT.print(OS);
414 }
415 report_fatal_error("unhandled argument type: " + Twine(Str));
416 }
417 case MVT::i32:
418 case MVT::i64:
419 Register VReg = RegInfo.createVirtualRegister(
420 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
421 RegInfo.addLiveIn(VA.getLocReg(), VReg);
422 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
423
424 // If this is an value that has been promoted to wider types, insert an
425 // assert[sz]ext to capture this, then truncate to the right size.
426 if (VA.getLocInfo() == CCValAssign::SExt)
427 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
428 DAG.getValueType(VA.getValVT()));
429 else if (VA.getLocInfo() == CCValAssign::ZExt)
430 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
431 DAG.getValueType(VA.getValVT()));
432
433 if (VA.getLocInfo() != CCValAssign::Full)
434 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
435
436 InVals.push_back(ArgValue);
437
438 break;
439 }
440 } else {
441 if (VA.isMemLoc())
442 HasMemArgs = true;
443 else
444 report_fatal_error("unhandled argument location");
445 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
446 }
447 }
448 if (HasMemArgs)
449 fail(DL, DAG, "stack arguments are not supported");
450 if (IsVarArg)
451 fail(DL, DAG, "variadic functions are not supported");
452 if (MF.getFunction().hasStructRetAttr())
453 fail(DL, DAG, "aggregate returns are not supported");
454
455 return Chain;
456}
457
458const size_t BPFTargetLowering::MaxArgs = 5;
459
460static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask,
461 MCRegister Reg) {
462 for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
463 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
464}
465
467 MachineFunction &MF,
468 const uint32_t *BaseRegMask) {
469 uint32_t *RegMask = MF.allocateRegMask();
470 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
471 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);
472 return RegMask;
473}
474
475SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
476 SmallVectorImpl<SDValue> &InVals) const {
477 SelectionDAG &DAG = CLI.DAG;
478 auto &Outs = CLI.Outs;
479 auto &OutVals = CLI.OutVals;
480 auto &Ins = CLI.Ins;
481 SDValue Chain = CLI.Chain;
482 SDValue Callee = CLI.Callee;
483 bool &IsTailCall = CLI.IsTailCall;
484 CallingConv::ID CallConv = CLI.CallConv;
485 bool IsVarArg = CLI.IsVarArg;
486 MachineFunction &MF = DAG.getMachineFunction();
487
488 // BPF target does not support tail call optimization.
489 IsTailCall = false;
490
491 switch (CallConv) {
492 default:
493 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
495 case CallingConv::C:
496 break;
497 }
498
499 // Analyze operands of the call, assigning locations to each operand.
501 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
502
503 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
504
505 unsigned NumBytes = CCInfo.getStackSize();
506
507 if (Outs.size() > MaxArgs)
508 fail(CLI.DL, DAG, "too many arguments", Callee);
509
510 for (auto &Arg : Outs) {
511 ISD::ArgFlagsTy Flags = Arg.Flags;
512 if (!Flags.isByVal())
513 continue;
514 fail(CLI.DL, DAG, "pass by value not supported", Callee);
515 break;
516 }
517
518 auto PtrVT = getPointerTy(MF.getDataLayout());
519 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
520
521 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
522
523 // Walk arg assignments
524 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {
525 CCValAssign &VA = ArgLocs[i];
526 SDValue &Arg = OutVals[i];
527
528 // Promote the value if needed.
529 switch (VA.getLocInfo()) {
530 default:
531 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
533 break;
535 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
536 break;
538 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
539 break;
541 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
542 break;
543 }
544
545 // Push arguments into RegsToPass vector
546 if (VA.isRegLoc())
547 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
548 else
549 report_fatal_error("stack arguments are not supported");
550 }
551
552 SDValue InGlue;
553
554 // Build a sequence of copy-to-reg nodes chained together with token chain and
555 // flag operands which copy the outgoing args into registers. The InGlue in
556 // necessary since all emitted instructions must be stuck together.
557 for (auto &Reg : RegsToPass) {
558 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
559 InGlue = Chain.getValue(1);
560 }
561
562 // If the callee is a GlobalAddress node (quite common, every direct call is)
563 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
564 // Likewise ExternalSymbol -> TargetExternalSymbol.
565 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
566 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
567 G->getOffset(), 0);
568 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
569 if (StringRef(E->getSymbol()) != BPF_TRAP) {
570 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
571 if (!AllowBuiltinCalls)
572 fail(CLI.DL, DAG,
573 Twine("A call to built-in function '" + StringRef(E->getSymbol()) +
574 "' is not supported."));
575 }
576 }
577
578 // Returns a chain & a flag for retval copy to use.
579 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
581 Ops.push_back(Chain);
582 Ops.push_back(Callee);
583
584 // Add argument registers to the end of the list so that they are
585 // known live into the call.
586 for (auto &Reg : RegsToPass)
587 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
588
589 bool HasFastCall =
590 (CLI.CB && isa<CallInst>(CLI.CB) && CLI.CB->hasFnAttr("bpf_fastcall"));
591 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
592 if (HasFastCall) {
593 uint32_t *RegMask = regMaskFromTemplate(
594 TRI, MF, TRI->getCallPreservedMask(MF, CallingConv::PreserveAll));
595 for (auto const &RegPair : RegsToPass)
596 resetRegMaskBit(TRI, RegMask, RegPair.first);
597 if (!CLI.CB->getType()->isVoidTy())
598 resetRegMaskBit(TRI, RegMask, BPF::R0);
599 Ops.push_back(DAG.getRegisterMask(RegMask));
600 } else {
601 Ops.push_back(
602 DAG.getRegisterMask(TRI->getCallPreservedMask(MF, CLI.CallConv)));
603 }
604
605 if (InGlue.getNode())
606 Ops.push_back(InGlue);
607
608 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
609 InGlue = Chain.getValue(1);
610
611 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
612
613 // Create the CALLSEQ_END node.
614 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
615 InGlue = Chain.getValue(1);
616
617 // Handle result values, copying them out of physregs into vregs that we
618 // return.
619 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
620 InVals);
621}
622
624BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
625 bool IsVarArg,
627 const SmallVectorImpl<SDValue> &OutVals,
628 const SDLoc &DL, SelectionDAG &DAG) const {
629 unsigned Opc = BPFISD::RET_GLUE;
630
631 // CCValAssign - represent the assignment of the return value to a location
633 MachineFunction &MF = DAG.getMachineFunction();
634
635 // CCState - Info about the registers and stack slot.
636 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
637
639 fail(DL, DAG, "aggregate returns are not supported");
640 return DAG.getNode(Opc, DL, MVT::Other, Chain);
641 }
642
643 // Analize return values.
644 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
645
646 SDValue Glue;
647 SmallVector<SDValue, 4> RetOps(1, Chain);
648
649 // Copy the result values into the output registers.
650 for (size_t i = 0; i != RVLocs.size(); ++i) {
651 CCValAssign &VA = RVLocs[i];
652 if (!VA.isRegLoc())
653 report_fatal_error("stack return values are not supported");
654
655 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
656
657 // Guarantee that all emitted copies are stuck together,
658 // avoiding something bad.
659 Glue = Chain.getValue(1);
660 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
661 }
662
663 RetOps[0] = Chain; // Update chain.
664
665 // Add the glue if we have it.
666 if (Glue.getNode())
667 RetOps.push_back(Glue);
668
669 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
670}
671
672SDValue BPFTargetLowering::LowerCallResult(
673 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
674 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
675 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
676
677 MachineFunction &MF = DAG.getMachineFunction();
678 // Assign locations to each value returned by this call.
680 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
681
682 if (Ins.size() > 1) {
683 fail(DL, DAG, "only small returns supported");
684 for (auto &In : Ins)
685 InVals.push_back(DAG.getConstant(0, DL, In.VT));
686 return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InGlue).getValue(1);
687 }
688
689 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
690
691 // Copy all of the result registers out of their specified physreg.
692 for (auto &Val : RVLocs) {
693 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
694 Val.getValVT(), InGlue).getValue(1);
695 InGlue = Chain.getValue(2);
696 InVals.push_back(Chain.getValue(0));
697 }
698
699 return Chain;
700}
701
703 switch (CC) {
704 default:
705 break;
706 case ISD::SETULT:
707 case ISD::SETULE:
708 case ISD::SETLT:
709 case ISD::SETLE:
711 std::swap(LHS, RHS);
712 break;
713 }
714}
715
716SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
717 SDLoc DL(Op);
718 fail(DL, DAG,
719 "unsupported signed division, please convert to unsigned div/mod.");
720 return DAG.getUNDEF(Op->getValueType(0));
721}
722
723SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
724 SelectionDAG &DAG) const {
725 SDLoc DL(Op);
726 fail(DL, DAG, "unsupported dynamic stack allocation");
727 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
728 return DAG.getMergeValues(Ops, SDLoc());
729}
730
731SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
732 SDValue Chain = Op.getOperand(0);
733 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
734 SDValue LHS = Op.getOperand(2);
735 SDValue RHS = Op.getOperand(3);
736 SDValue Dest = Op.getOperand(4);
737 SDLoc DL(Op);
738
739 if (!getHasJmpExt())
740 NegateCC(LHS, RHS, CC);
741
742 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
743 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
744}
745
746SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
747 SDValue LHS = Op.getOperand(0);
748 SDValue RHS = Op.getOperand(1);
749 SDValue TrueV = Op.getOperand(2);
750 SDValue FalseV = Op.getOperand(3);
751 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
752 SDLoc DL(Op);
753
754 if (!getHasJmpExt())
755 NegateCC(LHS, RHS, CC);
756
757 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
758 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
759
760 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);
761}
762
763SDValue BPFTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
764 SelectionDAG &DAG) const {
765 SDNode *N = Op.getNode();
766 SDLoc DL(N);
767
768 if (cast<AtomicSDNode>(N)->getMergedOrdering() ==
770 fail(DL, DAG,
771 "sequentially consistent (seq_cst) "
772 "atomic load/store is not supported");
773
774 return Op;
775}
776
778 if (auto *Fn = M->getFunction(BPF_TRAP))
779 return Fn;
780
781 FunctionType *FT = FunctionType::get(Type::getVoidTy(M->getContext()), false);
782 Function *NewF =
784 NewF->setDSOLocal(true);
786 NewF->setSection(".ksyms");
787
788 if (M->debug_compile_units().empty())
789 return NewF;
790
791 DIBuilder DBuilder(*M);
792 DITypeRefArray ParamTypes =
793 DBuilder.getOrCreateTypeArray({nullptr /*void return*/});
794 DISubroutineType *FuncType = DBuilder.createSubroutineType(ParamTypes);
795 DICompileUnit *CU = *M->debug_compile_units_begin();
796 DISubprogram *SP =
797 DBuilder.createFunction(CU, BPF_TRAP, BPF_TRAP, nullptr, 0, FuncType, 0,
798 DINode::FlagZero, DISubprogram::SPFlagZero);
799 NewF->setSubprogram(SP);
800 return NewF;
801}
802
803SDValue BPFTargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
804 MachineFunction &MF = DAG.getMachineFunction();
805 TargetLowering::CallLoweringInfo CLI(DAG);
807 SDNode *N = Op.getNode();
808 SDLoc DL(N);
809
811 auto PtrVT = getPointerTy(MF.getDataLayout());
812 CLI.Callee = DAG.getTargetGlobalAddress(Fn, DL, PtrVT);
813 CLI.Chain = N->getOperand(0);
814 CLI.IsTailCall = false;
816 CLI.IsVarArg = false;
817 CLI.DL = DL;
818 CLI.NoMerge = false;
819 CLI.DoesNotReturn = true;
820 return LowerCall(CLI, InVals);
821}
822
823SDValue BPFTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
824 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
825 return getAddr(N, DAG);
826}
827
829 SelectionDAG &DAG, unsigned Flags) {
830 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
831 N->getOffset(), Flags);
832}
833
835 SelectionDAG &DAG, unsigned Flags) {
836 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
837}
838
839template <class NodeTy>
840SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
841 unsigned Flags) const {
842 SDLoc DL(N);
843
844 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
845
846 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
847}
848
849SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
850 SelectionDAG &DAG) const {
851 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
852 if (N->getOffset() != 0)
853 report_fatal_error("invalid offset for global address: " +
854 Twine(N->getOffset()));
855
856 const GlobalValue *GVal = N->getGlobal();
857 SDLoc DL(Op);
858
859 // Wrap it in a TargetGlobalAddress
860 SDValue Addr = DAG.getTargetGlobalAddress(GVal, DL, MVT::i64);
861
862 // Emit pseudo instruction
863 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
864}
865
866SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
867 SelectionDAG &DAG) const {
868 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
869
870 return getAddr(N, DAG);
871}
872
873SDValue BPFTargetLowering::LowerBlockAddress(SDValue Op,
874 SelectionDAG &DAG) const {
875 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
876 SDLoc DL(Op);
877
878 // Wrap it in a TargetBlockAddress
879 SDValue Addr = DAG.getTargetBlockAddress(BA, MVT::i64);
880
881 // Emit pseudo instruction
882 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
883}
884
885unsigned
886BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
887 unsigned Reg, bool isSigned) const {
888 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
889 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
890 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
891 MachineFunction *F = BB->getParent();
892 DebugLoc DL = MI.getDebugLoc();
893
894 MachineRegisterInfo &RegInfo = F->getRegInfo();
895
896 if (!isSigned) {
897 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
898 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
899 return PromotedReg0;
900 }
901 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
902 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
903 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
904 if (HasMovsx) {
905 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
906 } else {
907 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
908 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
909 .addReg(PromotedReg0).addImm(32);
910 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
911 .addReg(PromotedReg1).addImm(32);
912 }
913
914 return PromotedReg2;
915}
916
918BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
920 const {
921 MachineFunction *MF = MI.getParent()->getParent();
922 MachineRegisterInfo &MRI = MF->getRegInfo();
923 MachineInstrBuilder MIB(*MF, MI);
924 unsigned ScratchReg;
925
926 // This function does custom insertion during lowering BPFISD::MEMCPY which
927 // only has two register operands from memcpy semantics, the copy source
928 // address and the copy destination address.
929 //
930 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
931 // a third scratch register to serve as the destination register of load and
932 // source register of store.
933 //
934 // The scratch register here is with the Define | Dead | EarlyClobber flags.
935 // The EarlyClobber flag has the semantic property that the operand it is
936 // attached to is clobbered before the rest of the inputs are read. Hence it
937 // must be unique among the operands to the instruction. The Define flag is
938 // needed to coerce the machine verifier that an Undef value isn't a problem
939 // as we anyway is loading memory into it. The Dead flag is needed as the
940 // value in scratch isn't supposed to be used by any other instruction.
941 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
942 MIB.addReg(ScratchReg,
944
945 return BB;
946}
947
948MachineBasicBlock *BPFTargetLowering::EmitInstrWithCustomInserterLDimm64(
949 MachineInstr &MI, MachineBasicBlock *BB) const {
950 MachineFunction *MF = BB->getParent();
951 const BPFInstrInfo *TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
952 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
953 MachineRegisterInfo &RegInfo = MF->getRegInfo();
954 DebugLoc DL = MI.getDebugLoc();
955
956 // Build address taken map for Global Varaibles and BlockAddresses
957 DenseMap<const BasicBlock *, MachineBasicBlock *> AddressTakenBBs;
958 for (MachineBasicBlock &MBB : *MF) {
959 if (const BasicBlock *BB = MBB.getBasicBlock())
960 if (BB->hasAddressTaken())
961 AddressTakenBBs[BB] = &MBB;
962 }
963
964 MachineOperand &MO = MI.getOperand(1);
965 assert(MO.isBlockAddress() || MO.isGlobal());
966
967 MCRegister ResultReg = MI.getOperand(0).getReg();
968 Register TmpReg = RegInfo.createVirtualRegister(RC);
969
970 std::vector<MachineBasicBlock *> Targets;
971 unsigned JTI;
972
973 if (MO.isBlockAddress()) {
974 auto *BA = MO.getBlockAddress();
975 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
976 assert(TgtMBB);
977
978 Targets.push_back(TgtMBB);
979 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
980 ->createJumpTableIndex(Targets);
981
982 BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), TmpReg)
983 .addJumpTableIndex(JTI);
984 BuildMI(*BB, MI, DL, TII->get(BPF::LDD), ResultReg)
985 .addReg(TmpReg)
986 .addImm(0);
987 MI.eraseFromParent();
988 return BB;
989 }
990
991 // Helper: emit LD_imm64 with operand GlobalAddress or JumpTable
992 auto emitLDImm64 = [&](const GlobalValue *GV = nullptr, unsigned JTI = -1) {
993 auto MIB = BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), ResultReg);
994 if (GV)
995 MIB.addGlobalAddress(GV);
996 else
997 MIB.addJumpTableIndex(JTI);
998 MI.eraseFromParent();
999 return BB;
1000 };
1001
1002 // Must be a global at this point
1003 const GlobalValue *GVal = MO.getGlobal();
1004 const auto *GV = dyn_cast<GlobalVariable>(GVal);
1005
1006 if (!GV || GV->getLinkage() != GlobalValue::PrivateLinkage ||
1007 !GV->isConstant() || !GV->hasInitializer())
1008 return emitLDImm64(GVal);
1009
1010 const auto *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1011 if (!CA)
1012 return emitLDImm64(GVal);
1013
1014 for (const Use &Op : CA->operands()) {
1015 if (!isa<BlockAddress>(Op))
1016 return emitLDImm64(GVal);
1017 auto *BA = cast<BlockAddress>(Op);
1018 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
1019 assert(TgtMBB);
1020 Targets.push_back(TgtMBB);
1021 }
1022
1023 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
1024 ->createJumpTableIndex(Targets);
1025 return emitLDImm64(nullptr, JTI);
1026}
1027
1030 MachineBasicBlock *BB) const {
1032 DebugLoc DL = MI.getDebugLoc();
1033 unsigned Opc = MI.getOpcode();
1034 bool isSelectRROp = (Opc == BPF::Select ||
1035 Opc == BPF::Select_64_32 ||
1036 Opc == BPF::Select_32 ||
1037 Opc == BPF::Select_32_64);
1038
1039 bool isMemcpyOp = Opc == BPF::MEMCPY;
1040 bool isLDimm64Op = Opc == BPF::LDIMM64;
1041
1042#ifndef NDEBUG
1043 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
1044 Opc == BPF::Select_Ri_64_32 ||
1045 Opc == BPF::Select_Ri_32 ||
1046 Opc == BPF::Select_Ri_32_64);
1047
1048 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp || isLDimm64Op))
1049 report_fatal_error("unhandled instruction type: " + Twine(Opc));
1050#endif
1051
1052 if (isMemcpyOp)
1053 return EmitInstrWithCustomInserterMemcpy(MI, BB);
1054
1055 if (isLDimm64Op)
1056 return EmitInstrWithCustomInserterLDimm64(MI, BB);
1057
1058 bool is32BitCmp = (Opc == BPF::Select_32 ||
1059 Opc == BPF::Select_32_64 ||
1060 Opc == BPF::Select_Ri_32 ||
1061 Opc == BPF::Select_Ri_32_64);
1062
1063 // To "insert" a SELECT instruction, we actually have to insert the diamond
1064 // control-flow pattern. The incoming instruction knows the destination vreg
1065 // to set, the condition code register to branch on, the true/false values to
1066 // select between, and a branch opcode to use.
1067 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1069
1070 // ThisMBB:
1071 // ...
1072 // TrueVal = ...
1073 // jmp_XX r1, r2 goto Copy1MBB
1074 // fallthrough --> Copy0MBB
1075 MachineBasicBlock *ThisMBB = BB;
1076 MachineFunction *F = BB->getParent();
1077 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1078 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
1079
1080 F->insert(I, Copy0MBB);
1081 F->insert(I, Copy1MBB);
1082 // Update machine-CFG edges by transferring all successors of the current
1083 // block to the new block which will contain the Phi node for the select.
1084 Copy1MBB->splice(Copy1MBB->begin(), BB,
1085 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1086 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
1087 // Next, add the true and fallthrough blocks as its successors.
1088 BB->addSuccessor(Copy0MBB);
1089 BB->addSuccessor(Copy1MBB);
1090
1091 // Insert Branch if Flag
1092 int CC = MI.getOperand(3).getImm();
1093 int NewCC;
1094 switch (CC) {
1095#define SET_NEWCC(X, Y) \
1096 case ISD::X: \
1097 if (is32BitCmp && HasJmp32) \
1098 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
1099 else \
1100 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
1101 break
1102 SET_NEWCC(SETGT, JSGT);
1103 SET_NEWCC(SETUGT, JUGT);
1104 SET_NEWCC(SETGE, JSGE);
1105 SET_NEWCC(SETUGE, JUGE);
1106 SET_NEWCC(SETEQ, JEQ);
1107 SET_NEWCC(SETNE, JNE);
1108 SET_NEWCC(SETLT, JSLT);
1109 SET_NEWCC(SETULT, JULT);
1110 SET_NEWCC(SETLE, JSLE);
1111 SET_NEWCC(SETULE, JULE);
1112 default:
1113 report_fatal_error("unimplemented select CondCode " + Twine(CC));
1114 }
1115
1116 Register LHS = MI.getOperand(1).getReg();
1117 bool isSignedCmp = (CC == ISD::SETGT ||
1118 CC == ISD::SETGE ||
1119 CC == ISD::SETLT ||
1120 CC == ISD::SETLE);
1121
1122 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
1123 // to be promoted, however if the 32-bit comparison operands are destination
1124 // registers then they are implicitly zero-extended already, there is no
1125 // need of explicit zero-extend sequence for them.
1126 //
1127 // We simply do extension for all situations in this method, but we will
1128 // try to remove those unnecessary in BPFMIPeephole pass.
1129 if (is32BitCmp && !HasJmp32)
1130 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
1131
1132 if (isSelectRROp) {
1133 Register RHS = MI.getOperand(2).getReg();
1134
1135 if (is32BitCmp && !HasJmp32)
1136 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
1137
1138 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
1139 } else {
1140 int64_t imm32 = MI.getOperand(2).getImm();
1141 // Check before we build J*_ri instruction.
1142 if (!isInt<32>(imm32))
1143 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
1144 BuildMI(BB, DL, TII.get(NewCC))
1145 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
1146 }
1147
1148 // Copy0MBB:
1149 // %FalseValue = ...
1150 // # fallthrough to Copy1MBB
1151 BB = Copy0MBB;
1152
1153 // Update machine-CFG edges
1154 BB->addSuccessor(Copy1MBB);
1155
1156 // Copy1MBB:
1157 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
1158 // ...
1159 BB = Copy1MBB;
1160 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
1161 .addReg(MI.getOperand(5).getReg())
1162 .addMBB(Copy0MBB)
1163 .addReg(MI.getOperand(4).getReg())
1164 .addMBB(ThisMBB);
1165
1166 MI.eraseFromParent(); // The pseudo instruction is gone now.
1167 return BB;
1168}
1169
1171 EVT VT) const {
1172 return getHasAlu32() ? MVT::i32 : MVT::i64;
1173}
1174
1176 EVT VT) const {
1177 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
1178}
1179
1180bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1181 const AddrMode &AM, Type *Ty,
1182 unsigned AS,
1183 Instruction *I) const {
1184 // No global is ever allowed as a base.
1185 if (AM.BaseGV)
1186 return false;
1187
1188 switch (AM.Scale) {
1189 case 0: // "r+i" or just "i", depending on HasBaseReg.
1190 break;
1191 case 1:
1192 if (!AM.HasBaseReg) // allow "r+i".
1193 break;
1194 return false; // disallow "r+r" or "r+r+i".
1195 default:
1196 return false;
1197 }
1198
1199 return true;
1200}
1201
1202bool BPFTargetLowering::shouldSignExtendTypeInLibCall(Type *Ty,
1203 bool IsSigned) const {
1204 return IsSigned || Ty->isIntegerTy(32);
1205}
1206
1207bool BPFTargetLowering::CanLowerReturn(
1208 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
1210 const Type *RetTy) const {
1211 // At minimal return Outs.size() <= 1, or check valid types in CC.
1213 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1214 return CCInfo.CheckReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
1215}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)
static Function * createBPFUnreachable(Module *M)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static cl::opt< unsigned > BPFMinimumJumpTableEntries("bpf-min-jump-table-entries", cl::init(13), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on BPF"))
static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
#define BPF_TRAP
Definition BPF.h:25
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
bool getAllowBuiltinCalls() const
bool getAllowsMisalignedMemAccess() const
bool getHasJmpExt() const
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
bool hasLdsx() const
bool hasGotox() const
bool hasMovsx() const
bool getHasJmp32() const
const BPFRegisterInfo * getRegisterInfo() const override
bool getHasAlu32() const
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *) const override
Determine if the target supports unaligned memory accesses.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BasicBlock * getBasicBlock() const
Definition Constants.h:934
CCState - This class holds information needed while lowering arguments and return values.
Register getLocReg() const
LocInfo getLocInfo() const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
LLVM_ABI DISubroutineType * createSubroutineType(DITypeRefArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)
Create subroutine type.
LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)
Create a new descriptor for the specified subprogram.
LLVM_ABI DITypeRefArray getOrCreateTypeArray(ArrayRef< Metadata * > Elements)
Get a DITypeRefArray, create one if required.
Subprogram description. Uses SubclassData1.
Type array for a subprogram.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
void setCallingConv(CallingConv::ID CC)
Definition Function.h:274
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:275
LinkageTypes getLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags
Flags values. These may be or'd together.
const GlobalValue * getGlobal() const
const BlockAddress * getBlockAddress() const
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
A raw_ostream that writes to an std::string.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:780
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:841
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:832
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:779
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:701
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:838
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:799
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:876
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:844
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:821
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Dead
Unused definition.
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
void print(raw_ostream &OS) const
Implement operator<<.
Definition ValueTypes.h:496
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs