LLVM 22.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
25#include "llvm/IR/DIBuilder.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Debug.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE "bpf-lower"
37
38static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
39 cl::Hidden, cl::init(false),
40 cl::desc("Expand memcpy into load/store pairs in order"));
41
43 "bpf-min-jump-table-entries", cl::init(13), cl::Hidden,
44 cl::desc("Set minimum number of entries to use a jump table on BPF"));
45
46static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
47 SDValue Val = {}) {
48 std::string Str;
49 if (Val) {
50 raw_string_ostream OS(Str);
51 Val->print(OS);
52 OS << ' ';
53 }
56 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
57}
58
60 const BPFSubtarget &STI)
61 : TargetLowering(TM) {
62
63 // Set up the register classes.
64 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
65 if (STI.getHasAlu32())
66 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
67
68 // Compute derived properties from the register classes
70
72
73 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
74 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
75 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
76
77 if (!STI.hasGotox())
78 setOperationAction(ISD::BRIND, MVT::Other, Expand);
79
80 setOperationAction(ISD::TRAP, MVT::Other, Custom);
81
83 if (STI.hasGotox())
85
86 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
87 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
88 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
89
90 // Set unsupported atomic operations as Custom so
91 // we can emit better error messages than fatal error
92 // from selectiondag.
93 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
94 if (VT == MVT::i32) {
95 if (STI.getHasAlu32())
96 continue;
97 } else {
98 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
99 }
100
101 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
102 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
103 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
104 setOperationAction(ISD::ATOMIC_SWAP, VT, Custom);
105 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
106 }
107
108 for (auto VT : {MVT::i32, MVT::i64}) {
109 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
110 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
111 }
112
113 for (auto VT : { MVT::i32, MVT::i64 }) {
114 if (VT == MVT::i32 && !STI.getHasAlu32())
115 continue;
116
119 if (!STI.hasSdivSmod()) {
122 }
137
141 }
142
143 if (STI.getHasAlu32()) {
145 setOperationAction(ISD::BR_CC, MVT::i32,
146 STI.getHasJmp32() ? Custom : Promote);
147 }
148
150 if (!STI.hasMovsx()) {
154 }
155
156 // Extended load operations for i1 types must be promoted
157 for (MVT VT : MVT::integer_valuetypes()) {
161
162 if (!STI.hasLdsx()) {
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
165 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
166 }
167 }
168
172
173 // Function alignments
176
178 // LLVM generic code will try to expand memcpy into load/store pairs at this
179 // stage which is before quite a few IR optimization passes, therefore the
180 // loads and stores could potentially be moved apart from each other which
181 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
182 // compilers.
183 //
184 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
185 // of memcpy to later stage in IR optimization pipeline so those load/store
186 // pairs won't be touched and could be kept in order. Hence, we set
187 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
188 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
193 } else {
194 // inline memcpy() for kernel to see explicit copy
195 unsigned CommonMaxStores =
197
202 }
203
204 // CPU/Feature control
205 HasAlu32 = STI.getHasAlu32();
206 HasJmp32 = STI.getHasJmp32();
207 HasJmpExt = STI.getHasJmpExt();
208 HasMovsx = STI.hasMovsx();
209
210 AllowsMisalignedMemAccess = STI.getAllowsMisalignedMemAccess();
211}
212
215 unsigned *Fast) const {
216 // allows-misaligned-mem-access is disabled
217 if (!AllowsMisalignedMemAccess)
218 return false;
219
220 // only allow misalignment for simple value types
221 if (!VT.isSimple())
222 return false;
223
224 // always assume fast mode when misalignment is allowed
225 if (Fast)
226 *Fast = true;
227
228 return true;
229}
230
232 return false;
233}
234
235bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
236 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
237 return false;
238 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
239 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
240 return NumBits1 > NumBits2;
241}
242
243bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
244 if (!VT1.isInteger() || !VT2.isInteger())
245 return false;
246 unsigned NumBits1 = VT1.getSizeInBits();
247 unsigned NumBits2 = VT2.getSizeInBits();
248 return NumBits1 > NumBits2;
249}
250
251bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
252 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
253 return false;
254 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
255 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
256 return NumBits1 == 32 && NumBits2 == 64;
257}
258
259bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
260 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
261 return false;
262 unsigned NumBits1 = VT1.getSizeInBits();
263 unsigned NumBits2 = VT2.getSizeInBits();
264 return NumBits1 == 32 && NumBits2 == 64;
265}
266
267bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
268 EVT VT1 = Val.getValueType();
269 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
270 MVT MT1 = VT1.getSimpleVT().SimpleTy;
271 MVT MT2 = VT2.getSimpleVT().SimpleTy;
272 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
273 (MT2 == MVT::i32 || MT2 == MVT::i64))
274 return true;
275 }
276 return TargetLoweringBase::isZExtFree(Val, VT2);
277}
278
282
285 if (Constraint.size() == 1) {
286 switch (Constraint[0]) {
287 default:
288 break;
289 case 'w':
290 return C_RegisterClass;
291 }
292 }
293
294 return TargetLowering::getConstraintType(Constraint);
295}
296
297std::pair<unsigned, const TargetRegisterClass *>
299 StringRef Constraint,
300 MVT VT) const {
301 if (Constraint.size() == 1) {
302 // GCC Constraint Letters
303 switch (Constraint[0]) {
304 case 'r': // GENERAL_REGS
305 return std::make_pair(0U, &BPF::GPRRegClass);
306 case 'w':
307 if (HasAlu32)
308 return std::make_pair(0U, &BPF::GPR32RegClass);
309 break;
310 default:
311 break;
312 }
313 }
314
316}
317
318void BPFTargetLowering::ReplaceNodeResults(
320 const char *Msg;
321 uint32_t Opcode = N->getOpcode();
322 switch (Opcode) {
323 default:
324 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
325 case ISD::ATOMIC_LOAD_ADD:
326 case ISD::ATOMIC_LOAD_AND:
327 case ISD::ATOMIC_LOAD_OR:
328 case ISD::ATOMIC_LOAD_XOR:
329 case ISD::ATOMIC_SWAP:
330 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
331 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
332 Msg = "unsupported atomic operation, please use 32/64 bit version";
333 else
334 Msg = "unsupported atomic operation, please use 64 bit version";
335 break;
336 case ISD::ATOMIC_LOAD:
337 case ISD::ATOMIC_STORE:
338 return;
339 }
340
341 SDLoc DL(N);
342 // We'll still produce a fatal error downstream, but this diagnostic is more
343 // user-friendly.
344 fail(DL, DAG, Msg);
345}
346
348 switch (Op.getOpcode()) {
349 default:
350 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
351 case ISD::BR_CC:
352 return LowerBR_CC(Op, DAG);
353 case ISD::JumpTable:
354 return LowerJumpTable(Op, DAG);
356 return LowerGlobalAddress(Op, DAG);
358 return LowerConstantPool(Op, DAG);
360 return LowerBlockAddress(Op, DAG);
361 case ISD::SELECT_CC:
362 return LowerSELECT_CC(Op, DAG);
363 case ISD::SDIV:
364 case ISD::SREM:
365 return LowerSDIVSREM(Op, DAG);
366 case ISD::DYNAMIC_STACKALLOC:
367 return LowerDYNAMIC_STACKALLOC(Op, DAG);
368 case ISD::ATOMIC_LOAD:
369 case ISD::ATOMIC_STORE:
370 return LowerATOMIC_LOAD_STORE(Op, DAG);
371 case ISD::TRAP:
372 return LowerTRAP(Op, DAG);
373 }
374}
375
376// Calling Convention Implementation
377#include "BPFGenCallingConv.inc"
378
379SDValue BPFTargetLowering::LowerFormalArguments(
380 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
381 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
382 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
383 switch (CallConv) {
384 default:
385 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
386 case CallingConv::C:
388 break;
389 }
390
393
394 // Assign locations to all of the incoming arguments.
396 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
397 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
398
399 bool HasMemArgs = false;
400 for (size_t I = 0; I < ArgLocs.size(); ++I) {
401 auto &VA = ArgLocs[I];
402
403 if (VA.isRegLoc()) {
404 // Arguments passed in registers
405 EVT RegVT = VA.getLocVT();
406 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
407 switch (SimpleTy) {
408 default: {
409 std::string Str;
410 {
411 raw_string_ostream OS(Str);
412 RegVT.print(OS);
413 }
414 report_fatal_error("unhandled argument type: " + Twine(Str));
415 }
416 case MVT::i32:
417 case MVT::i64:
418 Register VReg = RegInfo.createVirtualRegister(
419 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
420 RegInfo.addLiveIn(VA.getLocReg(), VReg);
421 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
422
423 // If this is an value that has been promoted to wider types, insert an
424 // assert[sz]ext to capture this, then truncate to the right size.
425 if (VA.getLocInfo() == CCValAssign::SExt)
426 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
427 DAG.getValueType(VA.getValVT()));
428 else if (VA.getLocInfo() == CCValAssign::ZExt)
429 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
430 DAG.getValueType(VA.getValVT()));
431
432 if (VA.getLocInfo() != CCValAssign::Full)
433 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
434
435 InVals.push_back(ArgValue);
436
437 break;
438 }
439 } else {
440 if (VA.isMemLoc())
441 HasMemArgs = true;
442 else
443 report_fatal_error("unhandled argument location");
444 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
445 }
446 }
447 if (HasMemArgs)
448 fail(DL, DAG, "stack arguments are not supported");
449 if (IsVarArg)
450 fail(DL, DAG, "variadic functions are not supported");
451 if (MF.getFunction().hasStructRetAttr())
452 fail(DL, DAG, "aggregate returns are not supported");
453
454 return Chain;
455}
456
457const size_t BPFTargetLowering::MaxArgs = 5;
458
459static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask,
460 MCRegister Reg) {
461 for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
462 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
463}
464
466 MachineFunction &MF,
467 const uint32_t *BaseRegMask) {
468 uint32_t *RegMask = MF.allocateRegMask();
469 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
470 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);
471 return RegMask;
472}
473
474SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
475 SmallVectorImpl<SDValue> &InVals) const {
476 SelectionDAG &DAG = CLI.DAG;
477 auto &Outs = CLI.Outs;
478 auto &OutVals = CLI.OutVals;
479 auto &Ins = CLI.Ins;
480 SDValue Chain = CLI.Chain;
481 SDValue Callee = CLI.Callee;
482 bool &IsTailCall = CLI.IsTailCall;
483 CallingConv::ID CallConv = CLI.CallConv;
484 bool IsVarArg = CLI.IsVarArg;
485 MachineFunction &MF = DAG.getMachineFunction();
486
487 // BPF target does not support tail call optimization.
488 IsTailCall = false;
489
490 switch (CallConv) {
491 default:
492 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
494 case CallingConv::C:
495 break;
496 }
497
498 // Analyze operands of the call, assigning locations to each operand.
500 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
501
502 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
503
504 unsigned NumBytes = CCInfo.getStackSize();
505
506 if (Outs.size() > MaxArgs)
507 fail(CLI.DL, DAG, "too many arguments", Callee);
508
509 for (auto &Arg : Outs) {
510 ISD::ArgFlagsTy Flags = Arg.Flags;
511 if (!Flags.isByVal())
512 continue;
513 fail(CLI.DL, DAG, "pass by value not supported", Callee);
514 break;
515 }
516
517 auto PtrVT = getPointerTy(MF.getDataLayout());
518 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
519
520 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
521
522 // Walk arg assignments
523 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {
524 CCValAssign &VA = ArgLocs[i];
525 SDValue &Arg = OutVals[i];
526
527 // Promote the value if needed.
528 switch (VA.getLocInfo()) {
529 default:
530 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
532 break;
534 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
535 break;
537 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
538 break;
540 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
541 break;
542 }
543
544 // Push arguments into RegsToPass vector
545 if (VA.isRegLoc())
546 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
547 else
548 report_fatal_error("stack arguments are not supported");
549 }
550
551 SDValue InGlue;
552
553 // Build a sequence of copy-to-reg nodes chained together with token chain and
554 // flag operands which copy the outgoing args into registers. The InGlue in
555 // necessary since all emitted instructions must be stuck together.
556 for (auto &Reg : RegsToPass) {
557 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
558 InGlue = Chain.getValue(1);
559 }
560
561 // If the callee is a GlobalAddress node (quite common, every direct call is)
562 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
563 // Likewise ExternalSymbol -> TargetExternalSymbol.
564 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
565 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
566 G->getOffset(), 0);
567 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
568 if (StringRef(E->getSymbol()) != BPF_TRAP) {
569 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
570 fail(CLI.DL, DAG,
571 Twine("A call to built-in function '" + StringRef(E->getSymbol()) +
572 "' is not supported."));
573 }
574 }
575
576 // Returns a chain & a flag for retval copy to use.
577 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
579 Ops.push_back(Chain);
580 Ops.push_back(Callee);
581
582 // Add argument registers to the end of the list so that they are
583 // known live into the call.
584 for (auto &Reg : RegsToPass)
585 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
586
587 bool HasFastCall =
588 (CLI.CB && isa<CallInst>(CLI.CB) && CLI.CB->hasFnAttr("bpf_fastcall"));
589 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
590 if (HasFastCall) {
591 uint32_t *RegMask = regMaskFromTemplate(
592 TRI, MF, TRI->getCallPreservedMask(MF, CallingConv::PreserveAll));
593 for (auto const &RegPair : RegsToPass)
594 resetRegMaskBit(TRI, RegMask, RegPair.first);
595 if (!CLI.CB->getType()->isVoidTy())
596 resetRegMaskBit(TRI, RegMask, BPF::R0);
597 Ops.push_back(DAG.getRegisterMask(RegMask));
598 } else {
599 Ops.push_back(
600 DAG.getRegisterMask(TRI->getCallPreservedMask(MF, CLI.CallConv)));
601 }
602
603 if (InGlue.getNode())
604 Ops.push_back(InGlue);
605
606 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
607 InGlue = Chain.getValue(1);
608
609 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
610
611 // Create the CALLSEQ_END node.
612 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
613 InGlue = Chain.getValue(1);
614
615 // Handle result values, copying them out of physregs into vregs that we
616 // return.
617 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
618 InVals);
619}
620
622BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
623 bool IsVarArg,
625 const SmallVectorImpl<SDValue> &OutVals,
626 const SDLoc &DL, SelectionDAG &DAG) const {
627 unsigned Opc = BPFISD::RET_GLUE;
628
629 // CCValAssign - represent the assignment of the return value to a location
631 MachineFunction &MF = DAG.getMachineFunction();
632
633 // CCState - Info about the registers and stack slot.
634 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
635
637 fail(DL, DAG, "aggregate returns are not supported");
638 return DAG.getNode(Opc, DL, MVT::Other, Chain);
639 }
640
641 // Analize return values.
642 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
643
644 SDValue Glue;
645 SmallVector<SDValue, 4> RetOps(1, Chain);
646
647 // Copy the result values into the output registers.
648 for (size_t i = 0; i != RVLocs.size(); ++i) {
649 CCValAssign &VA = RVLocs[i];
650 if (!VA.isRegLoc())
651 report_fatal_error("stack return values are not supported");
652
653 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
654
655 // Guarantee that all emitted copies are stuck together,
656 // avoiding something bad.
657 Glue = Chain.getValue(1);
658 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
659 }
660
661 RetOps[0] = Chain; // Update chain.
662
663 // Add the glue if we have it.
664 if (Glue.getNode())
665 RetOps.push_back(Glue);
666
667 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
668}
669
670SDValue BPFTargetLowering::LowerCallResult(
671 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
672 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
673 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
674
675 MachineFunction &MF = DAG.getMachineFunction();
676 // Assign locations to each value returned by this call.
678 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
679
680 if (Ins.size() > 1) {
681 fail(DL, DAG, "only small returns supported");
682 for (auto &In : Ins)
683 InVals.push_back(DAG.getConstant(0, DL, In.VT));
684 return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InGlue).getValue(1);
685 }
686
687 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
688
689 // Copy all of the result registers out of their specified physreg.
690 for (auto &Val : RVLocs) {
691 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
692 Val.getValVT(), InGlue).getValue(1);
693 InGlue = Chain.getValue(2);
694 InVals.push_back(Chain.getValue(0));
695 }
696
697 return Chain;
698}
699
701 switch (CC) {
702 default:
703 break;
704 case ISD::SETULT:
705 case ISD::SETULE:
706 case ISD::SETLT:
707 case ISD::SETLE:
709 std::swap(LHS, RHS);
710 break;
711 }
712}
713
714SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
715 SDLoc DL(Op);
716 fail(DL, DAG,
717 "unsupported signed division, please convert to unsigned div/mod.");
718 return DAG.getUNDEF(Op->getValueType(0));
719}
720
721SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
722 SelectionDAG &DAG) const {
723 SDLoc DL(Op);
724 fail(DL, DAG, "unsupported dynamic stack allocation");
725 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
726 return DAG.getMergeValues(Ops, SDLoc());
727}
728
729SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
730 SDValue Chain = Op.getOperand(0);
731 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
732 SDValue LHS = Op.getOperand(2);
733 SDValue RHS = Op.getOperand(3);
734 SDValue Dest = Op.getOperand(4);
735 SDLoc DL(Op);
736
737 if (!getHasJmpExt())
738 NegateCC(LHS, RHS, CC);
739
740 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
741 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
742}
743
744SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
745 SDValue LHS = Op.getOperand(0);
746 SDValue RHS = Op.getOperand(1);
747 SDValue TrueV = Op.getOperand(2);
748 SDValue FalseV = Op.getOperand(3);
749 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
750 SDLoc DL(Op);
751
752 if (!getHasJmpExt())
753 NegateCC(LHS, RHS, CC);
754
755 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
756 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
757
758 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);
759}
760
761SDValue BPFTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
762 SelectionDAG &DAG) const {
763 SDNode *N = Op.getNode();
764 SDLoc DL(N);
765
766 if (cast<AtomicSDNode>(N)->getMergedOrdering() ==
768 fail(DL, DAG,
769 "sequentially consistent (seq_cst) "
770 "atomic load/store is not supported");
771
772 return Op;
773}
774
776 if (auto *Fn = M->getFunction(BPF_TRAP))
777 return Fn;
778
779 FunctionType *FT = FunctionType::get(Type::getVoidTy(M->getContext()), false);
780 Function *NewF =
782 NewF->setDSOLocal(true);
784 NewF->setSection(".ksyms");
785
786 if (M->debug_compile_units().empty())
787 return NewF;
788
789 DIBuilder DBuilder(*M);
790 DITypeRefArray ParamTypes =
791 DBuilder.getOrCreateTypeArray({nullptr /*void return*/});
792 DISubroutineType *FuncType = DBuilder.createSubroutineType(ParamTypes);
793 DICompileUnit *CU = *M->debug_compile_units_begin();
794 DISubprogram *SP =
795 DBuilder.createFunction(CU, BPF_TRAP, BPF_TRAP, nullptr, 0, FuncType, 0,
796 DINode::FlagZero, DISubprogram::SPFlagZero);
797 NewF->setSubprogram(SP);
798 return NewF;
799}
800
801SDValue BPFTargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
802 MachineFunction &MF = DAG.getMachineFunction();
803 TargetLowering::CallLoweringInfo CLI(DAG);
805 SDNode *N = Op.getNode();
806 SDLoc DL(N);
807
809 auto PtrVT = getPointerTy(MF.getDataLayout());
810 CLI.Callee = DAG.getTargetGlobalAddress(Fn, DL, PtrVT);
811 CLI.Chain = N->getOperand(0);
812 CLI.IsTailCall = false;
814 CLI.IsVarArg = false;
815 CLI.DL = DL;
816 CLI.NoMerge = false;
817 CLI.DoesNotReturn = true;
818 return LowerCall(CLI, InVals);
819}
820
821SDValue BPFTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
822 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
823 return getAddr(N, DAG);
824}
825
827 SelectionDAG &DAG, unsigned Flags) {
828 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
829 N->getOffset(), Flags);
830}
831
833 SelectionDAG &DAG, unsigned Flags) {
834 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
835}
836
837template <class NodeTy>
838SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
839 unsigned Flags) const {
840 SDLoc DL(N);
841
842 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
843
844 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
845}
846
847SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
848 SelectionDAG &DAG) const {
849 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
850 if (N->getOffset() != 0)
851 report_fatal_error("invalid offset for global address: " +
852 Twine(N->getOffset()));
853
854 const GlobalValue *GVal = N->getGlobal();
855 SDLoc DL(Op);
856
857 // Wrap it in a TargetGlobalAddress
858 SDValue Addr = DAG.getTargetGlobalAddress(GVal, DL, MVT::i64);
859
860 // Emit pseudo instruction
861 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
862}
863
864SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
865 SelectionDAG &DAG) const {
866 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
867
868 return getAddr(N, DAG);
869}
870
871SDValue BPFTargetLowering::LowerBlockAddress(SDValue Op,
872 SelectionDAG &DAG) const {
873 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
874 SDLoc DL(Op);
875
876 // Wrap it in a TargetBlockAddress
877 SDValue Addr = DAG.getTargetBlockAddress(BA, MVT::i64);
878
879 // Emit pseudo instruction
880 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
881}
882
883unsigned
884BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
885 unsigned Reg, bool isSigned) const {
886 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
887 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
888 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
889 MachineFunction *F = BB->getParent();
890 DebugLoc DL = MI.getDebugLoc();
891
892 MachineRegisterInfo &RegInfo = F->getRegInfo();
893
894 if (!isSigned) {
895 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
896 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
897 return PromotedReg0;
898 }
899 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
900 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
901 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
902 if (HasMovsx) {
903 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
904 } else {
905 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
906 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
907 .addReg(PromotedReg0).addImm(32);
908 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
909 .addReg(PromotedReg1).addImm(32);
910 }
911
912 return PromotedReg2;
913}
914
916BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
918 const {
919 MachineFunction *MF = MI.getParent()->getParent();
920 MachineRegisterInfo &MRI = MF->getRegInfo();
921 MachineInstrBuilder MIB(*MF, MI);
922 unsigned ScratchReg;
923
924 // This function does custom insertion during lowering BPFISD::MEMCPY which
925 // only has two register operands from memcpy semantics, the copy source
926 // address and the copy destination address.
927 //
928 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
929 // a third scratch register to serve as the destination register of load and
930 // source register of store.
931 //
932 // The scratch register here is with the Define | Dead | EarlyClobber flags.
933 // The EarlyClobber flag has the semantic property that the operand it is
934 // attached to is clobbered before the rest of the inputs are read. Hence it
935 // must be unique among the operands to the instruction. The Define flag is
936 // needed to coerce the machine verifier that an Undef value isn't a problem
937 // as we anyway is loading memory into it. The Dead flag is needed as the
938 // value in scratch isn't supposed to be used by any other instruction.
939 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
940 MIB.addReg(ScratchReg,
942
943 return BB;
944}
945
946MachineBasicBlock *BPFTargetLowering::EmitInstrWithCustomInserterLDimm64(
947 MachineInstr &MI, MachineBasicBlock *BB) const {
948 MachineFunction *MF = BB->getParent();
949 const BPFInstrInfo *TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
950 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
951 MachineRegisterInfo &RegInfo = MF->getRegInfo();
952 DebugLoc DL = MI.getDebugLoc();
953
954 // Build address taken map for Global Varaibles and BlockAddresses
955 DenseMap<const BasicBlock *, MachineBasicBlock *> AddressTakenBBs;
956 for (MachineBasicBlock &MBB : *MF) {
957 if (const BasicBlock *BB = MBB.getBasicBlock())
958 if (BB->hasAddressTaken())
959 AddressTakenBBs[BB] = &MBB;
960 }
961
962 MachineOperand &MO = MI.getOperand(1);
963 assert(MO.isBlockAddress() || MO.isGlobal());
964
965 MCRegister ResultReg = MI.getOperand(0).getReg();
966 Register TmpReg = RegInfo.createVirtualRegister(RC);
967
968 std::vector<MachineBasicBlock *> Targets;
969 unsigned JTI;
970
971 if (MO.isBlockAddress()) {
972 auto *BA = MO.getBlockAddress();
973 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
974 assert(TgtMBB);
975
976 Targets.push_back(TgtMBB);
977 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
978 ->createJumpTableIndex(Targets);
979
980 BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), TmpReg)
981 .addJumpTableIndex(JTI);
982 BuildMI(*BB, MI, DL, TII->get(BPF::LDD), ResultReg)
983 .addReg(TmpReg)
984 .addImm(0);
985 MI.eraseFromParent();
986 return BB;
987 }
988
989 // Helper: emit LD_imm64 with operand GlobalAddress or JumpTable
990 auto emitLDImm64 = [&](const GlobalValue *GV = nullptr, unsigned JTI = -1) {
991 auto MIB = BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), ResultReg);
992 if (GV)
993 MIB.addGlobalAddress(GV);
994 else
995 MIB.addJumpTableIndex(JTI);
996 MI.eraseFromParent();
997 return BB;
998 };
999
1000 // Must be a global at this point
1001 const GlobalValue *GVal = MO.getGlobal();
1002 const auto *GV = dyn_cast<GlobalVariable>(GVal);
1003
1004 if (!GV || GV->getLinkage() != GlobalValue::PrivateLinkage ||
1005 !GV->isConstant() || !GV->hasInitializer())
1006 return emitLDImm64(GVal);
1007
1008 const auto *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1009 if (!CA)
1010 return emitLDImm64(GVal);
1011
1012 for (const Use &Op : CA->operands()) {
1013 if (!isa<BlockAddress>(Op))
1014 return emitLDImm64(GVal);
1015 auto *BA = cast<BlockAddress>(Op);
1016 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
1017 assert(TgtMBB);
1018 Targets.push_back(TgtMBB);
1019 }
1020
1021 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
1022 ->createJumpTableIndex(Targets);
1023 return emitLDImm64(nullptr, JTI);
1024}
1025
1028 MachineBasicBlock *BB) const {
1030 DebugLoc DL = MI.getDebugLoc();
1031 unsigned Opc = MI.getOpcode();
1032 bool isSelectRROp = (Opc == BPF::Select ||
1033 Opc == BPF::Select_64_32 ||
1034 Opc == BPF::Select_32 ||
1035 Opc == BPF::Select_32_64);
1036
1037 bool isMemcpyOp = Opc == BPF::MEMCPY;
1038 bool isLDimm64Op = Opc == BPF::LDIMM64;
1039
1040#ifndef NDEBUG
1041 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
1042 Opc == BPF::Select_Ri_64_32 ||
1043 Opc == BPF::Select_Ri_32 ||
1044 Opc == BPF::Select_Ri_32_64);
1045
1046 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp || isLDimm64Op))
1047 report_fatal_error("unhandled instruction type: " + Twine(Opc));
1048#endif
1049
1050 if (isMemcpyOp)
1051 return EmitInstrWithCustomInserterMemcpy(MI, BB);
1052
1053 if (isLDimm64Op)
1054 return EmitInstrWithCustomInserterLDimm64(MI, BB);
1055
1056 bool is32BitCmp = (Opc == BPF::Select_32 ||
1057 Opc == BPF::Select_32_64 ||
1058 Opc == BPF::Select_Ri_32 ||
1059 Opc == BPF::Select_Ri_32_64);
1060
1061 // To "insert" a SELECT instruction, we actually have to insert the diamond
1062 // control-flow pattern. The incoming instruction knows the destination vreg
1063 // to set, the condition code register to branch on, the true/false values to
1064 // select between, and a branch opcode to use.
1065 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1067
1068 // ThisMBB:
1069 // ...
1070 // TrueVal = ...
1071 // jmp_XX r1, r2 goto Copy1MBB
1072 // fallthrough --> Copy0MBB
1073 MachineBasicBlock *ThisMBB = BB;
1074 MachineFunction *F = BB->getParent();
1075 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1076 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
1077
1078 F->insert(I, Copy0MBB);
1079 F->insert(I, Copy1MBB);
1080 // Update machine-CFG edges by transferring all successors of the current
1081 // block to the new block which will contain the Phi node for the select.
1082 Copy1MBB->splice(Copy1MBB->begin(), BB,
1083 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1084 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
1085 // Next, add the true and fallthrough blocks as its successors.
1086 BB->addSuccessor(Copy0MBB);
1087 BB->addSuccessor(Copy1MBB);
1088
1089 // Insert Branch if Flag
1090 int CC = MI.getOperand(3).getImm();
1091 int NewCC;
1092 switch (CC) {
1093#define SET_NEWCC(X, Y) \
1094 case ISD::X: \
1095 if (is32BitCmp && HasJmp32) \
1096 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
1097 else \
1098 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
1099 break
1100 SET_NEWCC(SETGT, JSGT);
1101 SET_NEWCC(SETUGT, JUGT);
1102 SET_NEWCC(SETGE, JSGE);
1103 SET_NEWCC(SETUGE, JUGE);
1104 SET_NEWCC(SETEQ, JEQ);
1105 SET_NEWCC(SETNE, JNE);
1106 SET_NEWCC(SETLT, JSLT);
1107 SET_NEWCC(SETULT, JULT);
1108 SET_NEWCC(SETLE, JSLE);
1109 SET_NEWCC(SETULE, JULE);
1110 default:
1111 report_fatal_error("unimplemented select CondCode " + Twine(CC));
1112 }
1113
1114 Register LHS = MI.getOperand(1).getReg();
1115 bool isSignedCmp = (CC == ISD::SETGT ||
1116 CC == ISD::SETGE ||
1117 CC == ISD::SETLT ||
1118 CC == ISD::SETLE);
1119
1120 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
1121 // to be promoted, however if the 32-bit comparison operands are destination
1122 // registers then they are implicitly zero-extended already, there is no
1123 // need of explicit zero-extend sequence for them.
1124 //
1125 // We simply do extension for all situations in this method, but we will
1126 // try to remove those unnecessary in BPFMIPeephole pass.
1127 if (is32BitCmp && !HasJmp32)
1128 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
1129
1130 if (isSelectRROp) {
1131 Register RHS = MI.getOperand(2).getReg();
1132
1133 if (is32BitCmp && !HasJmp32)
1134 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
1135
1136 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
1137 } else {
1138 int64_t imm32 = MI.getOperand(2).getImm();
1139 // Check before we build J*_ri instruction.
1140 if (!isInt<32>(imm32))
1141 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
1142 BuildMI(BB, DL, TII.get(NewCC))
1143 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
1144 }
1145
1146 // Copy0MBB:
1147 // %FalseValue = ...
1148 // # fallthrough to Copy1MBB
1149 BB = Copy0MBB;
1150
1151 // Update machine-CFG edges
1152 BB->addSuccessor(Copy1MBB);
1153
1154 // Copy1MBB:
1155 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
1156 // ...
1157 BB = Copy1MBB;
1158 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
1159 .addReg(MI.getOperand(5).getReg())
1160 .addMBB(Copy0MBB)
1161 .addReg(MI.getOperand(4).getReg())
1162 .addMBB(ThisMBB);
1163
1164 MI.eraseFromParent(); // The pseudo instruction is gone now.
1165 return BB;
1166}
1167
1169 EVT VT) const {
1170 return getHasAlu32() ? MVT::i32 : MVT::i64;
1171}
1172
1174 EVT VT) const {
1175 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
1176}
1177
1178bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1179 const AddrMode &AM, Type *Ty,
1180 unsigned AS,
1181 Instruction *I) const {
1182 // No global is ever allowed as a base.
1183 if (AM.BaseGV)
1184 return false;
1185
1186 switch (AM.Scale) {
1187 case 0: // "r+i" or just "i", depending on HasBaseReg.
1188 break;
1189 case 1:
1190 if (!AM.HasBaseReg) // allow "r+i".
1191 break;
1192 return false; // disallow "r+r" or "r+r+i".
1193 default:
1194 return false;
1195 }
1196
1197 return true;
1198}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)
static Function * createBPFUnreachable(Module *M)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static cl::opt< unsigned > BPFMinimumJumpTableEntries("bpf-min-jump-table-entries", cl::init(13), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on BPF"))
static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
#define BPF_TRAP
Definition BPF.h:25
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
bool getAllowsMisalignedMemAccess() const
bool getHasJmpExt() const
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
bool hasLdsx() const
bool hasGotox() const
bool hasMovsx() const
bool getHasJmp32() const
const BPFRegisterInfo * getRegisterInfo() const override
bool getHasAlu32() const
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *) const override
Determine if the target supports unaligned memory accesses.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BasicBlock * getBasicBlock() const
Definition Constants.h:934
CCState - This class holds information needed while lowering arguments and return values.
Register getLocReg() const
LocInfo getLocInfo() const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
LLVM_ABI DISubroutineType * createSubroutineType(DITypeRefArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)
Create subroutine type.
LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)
Create a new descriptor for the specified subprogram.
LLVM_ABI DITypeRefArray getOrCreateTypeArray(ArrayRef< Metadata * > Elements)
Get a DITypeRefArray, create one if required.
Subprogram description. Uses SubclassData1.
Type array for a subprogram.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
Diagnostic information for unsupported feature in backend.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition Function.h:687
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:214
void setCallingConv(CallingConv::ID CC)
Definition Function.h:274
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:275
LinkageTypes getLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags
Flags values. These may be or'd together.
const GlobalValue * getGlobal() const
const BlockAddress * getBlockAddress() const
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition Type.h:304
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
self_iterator getIterator()
Definition ilist_node.h:123
A raw_ostream that writes to an std::string.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:780
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:841
@ GlobalAddress
Definition ISDOpcodes.h:88
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:832
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:779
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:701
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:838
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:799
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:876
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:844
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:821
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Dead
Unused definition.
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1150
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
void print(raw_ostream &OS) const
Implement operator<<.
Definition ValueTypes.h:496
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs