LLVM 23.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
25#include "llvm/IR/DIBuilder.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Support/Debug.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE "bpf-lower"
37
38static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
39 cl::Hidden, cl::init(false),
40 cl::desc("Expand memcpy into load/store pairs in order"));
41
43 "bpf-min-jump-table-entries", cl::init(13), cl::Hidden,
44 cl::desc("Set minimum number of entries to use a jump table on BPF"));
45
46static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
47 SDValue Val = {}) {
48 std::string Str;
49 if (Val) {
50 raw_string_ostream OS(Str);
51 Val->print(OS);
52 OS << ' ';
53 }
56 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
57}
58
60 const BPFSubtarget &STI)
61 : TargetLowering(TM, STI) {
62
63 // Set up the register classes.
64 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
65 if (STI.getHasAlu32())
66 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
67
68 // Compute derived properties from the register classes
70
72
76
77 if (!STI.hasGotox())
79
81
83 if (STI.hasGotox())
85
89
90 // Set unsupported atomic operations as Custom so
91 // we can emit better error messages than fatal error
92 // from selectiondag.
93 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
94 if (VT == MVT::i32) {
95 if (STI.getHasAlu32())
96 continue;
97 } else {
99 }
100
106 }
107
108 for (auto VT : {MVT::i32, MVT::i64}) {
111 }
112
113 for (auto VT : { MVT::i32, MVT::i64 }) {
114 if (VT == MVT::i32 && !STI.getHasAlu32())
115 continue;
116
119 if (!STI.hasSdivSmod()) {
122 }
137
141 }
142
143 if (STI.getHasAlu32()) {
146 STI.getHasJmp32() ? Custom : Promote);
147 }
148
150 if (!STI.hasMovsx()) {
154 }
155
156 // Extended load operations for i1 types must be promoted
157 for (MVT VT : MVT::integer_valuetypes()) {
161
162 if (!STI.hasLdsx()) {
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
165 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
166 }
167 }
168
172
173 // Function alignments
176
178 // LLVM generic code will try to expand memcpy into load/store pairs at this
179 // stage which is before quite a few IR optimization passes, therefore the
180 // loads and stores could potentially be moved apart from each other which
181 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
182 // compilers.
183 //
184 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
185 // of memcpy to later stage in IR optimization pipeline so those load/store
186 // pairs won't be touched and could be kept in order. Hence, we set
187 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
188 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
193 } else {
194 // inline memcpy() for kernel to see explicit copy
195 unsigned CommonMaxStores =
197
202 }
203
204 // CPU/Feature control
205 HasAlu32 = STI.getHasAlu32();
206 HasJmp32 = STI.getHasJmp32();
207 HasJmpExt = STI.getHasJmpExt();
208 HasMovsx = STI.hasMovsx();
209
210 AllowsMisalignedMemAccess = STI.getAllowsMisalignedMemAccess();
211}
212
215 unsigned *Fast) const {
216 // allows-misaligned-mem-access is disabled
217 if (!AllowsMisalignedMemAccess)
218 return false;
219
220 // only allow misalignment for simple value types
221 if (!VT.isSimple())
222 return false;
223
224 // always assume fast mode when misalignment is allowed
225 if (Fast)
226 *Fast = true;
227
228 return true;
229}
230
232 return false;
233}
234
235bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
236 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
237 return false;
238 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
239 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
240 return NumBits1 > NumBits2;
241}
242
243bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
244 if (!VT1.isInteger() || !VT2.isInteger())
245 return false;
246 unsigned NumBits1 = VT1.getSizeInBits();
247 unsigned NumBits2 = VT2.getSizeInBits();
248 return NumBits1 > NumBits2;
249}
250
251bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
252 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
253 return false;
254 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
255 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
256 return NumBits1 == 32 && NumBits2 == 64;
257}
258
259bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
260 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
261 return false;
262 unsigned NumBits1 = VT1.getSizeInBits();
263 unsigned NumBits2 = VT2.getSizeInBits();
264 return NumBits1 == 32 && NumBits2 == 64;
265}
266
267bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
268 EVT VT1 = Val.getValueType();
269 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
270 MVT MT1 = VT1.getSimpleVT().SimpleTy;
271 MVT MT2 = VT2.getSimpleVT().SimpleTy;
272 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
273 (MT2 == MVT::i32 || MT2 == MVT::i64))
274 return true;
275 }
276 return TargetLoweringBase::isZExtFree(Val, VT2);
277}
278
282
285 if (Constraint.size() == 1) {
286 switch (Constraint[0]) {
287 default:
288 break;
289 case 'w':
290 return C_RegisterClass;
291 }
292 }
293
294 return TargetLowering::getConstraintType(Constraint);
295}
296
297std::pair<unsigned, const TargetRegisterClass *>
299 StringRef Constraint,
300 MVT VT) const {
301 if (Constraint.size() == 1) {
302 // GCC Constraint Letters
303 switch (Constraint[0]) {
304 case 'r': // GENERAL_REGS
305 return std::make_pair(0U, &BPF::GPRRegClass);
306 case 'w':
307 if (HasAlu32)
308 return std::make_pair(0U, &BPF::GPR32RegClass);
309 break;
310 default:
311 break;
312 }
313 }
314
316}
317
318void BPFTargetLowering::ReplaceNodeResults(
320 const char *Msg;
321 uint32_t Opcode = N->getOpcode();
322 switch (Opcode) {
323 default:
324 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
329 case ISD::ATOMIC_SWAP:
331 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
332 Msg = "unsupported atomic operation, please use 32/64 bit version";
333 else
334 Msg = "unsupported atomic operation, please use 64 bit version";
335 break;
336 case ISD::ATOMIC_LOAD:
338 return;
339 }
340
341 SDLoc DL(N);
342 // We'll still produce a fatal error downstream, but this diagnostic is more
343 // user-friendly.
344 fail(DL, DAG, Msg);
345}
346
348 switch (Op.getOpcode()) {
349 default:
350 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
351 case ISD::BR_CC:
352 return LowerBR_CC(Op, DAG);
353 case ISD::JumpTable:
354 return LowerJumpTable(Op, DAG);
356 return LowerGlobalAddress(Op, DAG);
358 return LowerConstantPool(Op, DAG);
360 return LowerBlockAddress(Op, DAG);
361 case ISD::SELECT_CC:
362 return LowerSELECT_CC(Op, DAG);
363 case ISD::SDIV:
364 case ISD::SREM:
365 return LowerSDIVSREM(Op, DAG);
366 case ISD::SHL_PARTS:
367 case ISD::SRL_PARTS:
368 case ISD::SRA_PARTS:
369 return LowerShiftParts(Op, DAG);
371 return LowerDYNAMIC_STACKALLOC(Op, DAG);
372 case ISD::ATOMIC_LOAD:
374 return LowerATOMIC_LOAD_STORE(Op, DAG);
375 case ISD::TRAP:
376 return LowerTRAP(Op, DAG);
377 }
378}
379
380// Calling Convention Implementation
381#include "BPFGenCallingConv.inc"
382
383SDValue BPFTargetLowering::LowerFormalArguments(
384 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
385 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
386 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
387 switch (CallConv) {
388 default:
389 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
390 case CallingConv::C:
392 break;
393 }
394
397
398 // Assign locations to all of the incoming arguments.
400 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
401 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
402
403 bool HasMemArgs = false;
404 for (size_t I = 0; I < ArgLocs.size(); ++I) {
405 auto &VA = ArgLocs[I];
406
407 if (VA.isRegLoc()) {
408 // Arguments passed in registers
409 EVT RegVT = VA.getLocVT();
410 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
411 switch (SimpleTy) {
412 default: {
413 std::string Str;
414 {
415 raw_string_ostream OS(Str);
416 RegVT.print(OS);
417 }
418 report_fatal_error("unhandled argument type: " + Twine(Str));
419 }
420 case MVT::i32:
421 case MVT::i64:
422 Register VReg = RegInfo.createVirtualRegister(
423 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
424 RegInfo.addLiveIn(VA.getLocReg(), VReg);
425 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
426
427 // If this is an value that has been promoted to wider types, insert an
428 // assert[sz]ext to capture this, then truncate to the right size.
429 if (VA.getLocInfo() == CCValAssign::SExt)
430 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
431 DAG.getValueType(VA.getValVT()));
432 else if (VA.getLocInfo() == CCValAssign::ZExt)
433 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
434 DAG.getValueType(VA.getValVT()));
435
436 if (VA.getLocInfo() != CCValAssign::Full)
437 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
438
439 InVals.push_back(ArgValue);
440
441 break;
442 }
443 } else {
444 if (VA.isMemLoc())
445 HasMemArgs = true;
446 else
447 report_fatal_error("unhandled argument location");
448 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
449 }
450 }
451 if (HasMemArgs)
452 fail(DL, DAG, "stack arguments are not supported");
453 if (IsVarArg)
454 fail(DL, DAG, "variadic functions are not supported");
455 return Chain;
456}
457
458const size_t BPFTargetLowering::MaxArgs = 5;
459
460static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask,
461 MCRegister Reg) {
462 for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
463 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
464}
465
467 MachineFunction &MF,
468 const uint32_t *BaseRegMask) {
469 uint32_t *RegMask = MF.allocateRegMask();
470 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
471 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);
472 return RegMask;
473}
474
475SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
476 SmallVectorImpl<SDValue> &InVals) const {
477 SelectionDAG &DAG = CLI.DAG;
478 auto &Outs = CLI.Outs;
479 auto &OutVals = CLI.OutVals;
480 auto &Ins = CLI.Ins;
481 SDValue Chain = CLI.Chain;
482 SDValue Callee = CLI.Callee;
483 bool &IsTailCall = CLI.IsTailCall;
484 CallingConv::ID CallConv = CLI.CallConv;
485 bool IsVarArg = CLI.IsVarArg;
486 MachineFunction &MF = DAG.getMachineFunction();
487
488 // BPF target does not support tail call optimization.
489 IsTailCall = false;
490
491 switch (CallConv) {
492 default:
493 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
495 case CallingConv::C:
496 break;
497 }
498
499 // Analyze operands of the call, assigning locations to each operand.
501 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
502
503 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
504
505 unsigned NumBytes = CCInfo.getStackSize();
506
507 if (Outs.size() > MaxArgs)
508 fail(CLI.DL, DAG, "too many arguments", Callee);
509
510 for (auto &Arg : Outs) {
511 ISD::ArgFlagsTy Flags = Arg.Flags;
512 if (!Flags.isByVal())
513 continue;
514 fail(CLI.DL, DAG, "pass by value not supported", Callee);
515 break;
516 }
517
518 auto PtrVT = getPointerTy(MF.getDataLayout());
519 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
520
521 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
522
523 // Walk arg assignments
524 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {
525 CCValAssign &VA = ArgLocs[i];
526 SDValue &Arg = OutVals[i];
527
528 // Promote the value if needed.
529 switch (VA.getLocInfo()) {
530 default:
531 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
533 break;
535 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
536 break;
538 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
539 break;
541 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
542 break;
543 }
544
545 // Push arguments into RegsToPass vector
546 if (VA.isRegLoc())
547 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
548 else
549 report_fatal_error("stack arguments are not supported");
550 }
551
552 SDValue InGlue;
553
554 // Build a sequence of copy-to-reg nodes chained together with token chain and
555 // flag operands which copy the outgoing args into registers. The InGlue in
556 // necessary since all emitted instructions must be stuck together.
557 for (auto &Reg : RegsToPass) {
558 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
559 InGlue = Chain.getValue(1);
560 }
561
562 // If the callee is a GlobalAddress node (quite common, every direct call is)
563 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
564 // Likewise ExternalSymbol -> TargetExternalSymbol.
565 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
566 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
567 G->getOffset(), 0);
568 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
569 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
570 StringRef Sym = E->getSymbol();
571 if (Sym != BPF_TRAP && Sym != "__multi3" && Sym != "__divti3" &&
572 Sym != "__modti3" && Sym != "__udivti3" && Sym != "__umodti3" &&
573 Sym != "memcpy" && Sym != "memset" && Sym != "memmove")
574 fail(
575 CLI.DL, DAG,
576 Twine("A call to built-in function '" + Sym + "' is not supported."));
577 }
578
579 // Returns a chain & a flag for retval copy to use.
580 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
582 Ops.push_back(Chain);
583 Ops.push_back(Callee);
584
585 // Add argument registers to the end of the list so that they are
586 // known live into the call.
587 for (auto &Reg : RegsToPass)
588 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
589
590 bool HasFastCall =
591 (CLI.CB && isa<CallInst>(CLI.CB) && CLI.CB->hasFnAttr("bpf_fastcall"));
592 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
593 if (HasFastCall) {
594 uint32_t *RegMask = regMaskFromTemplate(
595 TRI, MF, TRI->getCallPreservedMask(MF, CallingConv::PreserveAll));
596 for (auto const &RegPair : RegsToPass)
597 resetRegMaskBit(TRI, RegMask, RegPair.first);
598 if (!CLI.CB->getType()->isVoidTy())
599 resetRegMaskBit(TRI, RegMask, BPF::R0);
600 Ops.push_back(DAG.getRegisterMask(RegMask));
601 } else {
602 Ops.push_back(
603 DAG.getRegisterMask(TRI->getCallPreservedMask(MF, CLI.CallConv)));
604 }
605
606 if (InGlue.getNode())
607 Ops.push_back(InGlue);
608
609 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
610 InGlue = Chain.getValue(1);
611
612 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
613
614 // Create the CALLSEQ_END node.
615 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
616 InGlue = Chain.getValue(1);
617
618 // Handle result values, copying them out of physregs into vregs that we
619 // return.
620 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
621 InVals);
622}
623
625BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
626 bool IsVarArg,
628 const SmallVectorImpl<SDValue> &OutVals,
629 const SDLoc &DL, SelectionDAG &DAG) const {
630 unsigned Opc = BPFISD::RET_GLUE;
631
632 // CCValAssign - represent the assignment of the return value to a location
634 MachineFunction &MF = DAG.getMachineFunction();
635
636 // CCState - Info about the registers and stack slot.
637 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
638
639 // Analize return values.
640 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
641
642 SDValue Glue;
643 SmallVector<SDValue, 4> RetOps(1, Chain);
644
645 // Copy the result values into the output registers.
646 for (size_t i = 0; i != RVLocs.size(); ++i) {
647 CCValAssign &VA = RVLocs[i];
648 if (!VA.isRegLoc())
649 report_fatal_error("stack return values are not supported");
650
651 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
652
653 // Guarantee that all emitted copies are stuck together,
654 // avoiding something bad.
655 Glue = Chain.getValue(1);
656 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
657 }
658
659 RetOps[0] = Chain; // Update chain.
660
661 // Add the glue if we have it.
662 if (Glue.getNode())
663 RetOps.push_back(Glue);
664
665 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
666}
667
668SDValue BPFTargetLowering::LowerCallResult(
669 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
670 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
671 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
672
673 MachineFunction &MF = DAG.getMachineFunction();
674 // Assign locations to each value returned by this call.
676 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
677
678 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
679
680 // Copy all of the result registers out of their specified physreg.
681 for (auto &Val : RVLocs) {
682 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
683 Val.getValVT(), InGlue).getValue(1);
684 InGlue = Chain.getValue(2);
685 InVals.push_back(Chain.getValue(0));
686 }
687
688 return Chain;
689}
690
692 switch (CC) {
693 default:
694 break;
695 case ISD::SETULT:
696 case ISD::SETULE:
697 case ISD::SETLT:
698 case ISD::SETLE:
700 std::swap(LHS, RHS);
701 break;
702 }
703}
704
705SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
706 SDLoc DL(Op);
707 fail(DL, DAG,
708 "unsupported signed division, please convert to unsigned div/mod.");
709 return DAG.getUNDEF(Op->getValueType(0));
710}
711
712SDValue BPFTargetLowering::LowerShiftParts(SDValue Op,
713 SelectionDAG &DAG) const {
714 SDValue Lo, Hi;
715 expandShiftParts(Op.getNode(), Lo, Hi, DAG);
716 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
717}
718
719SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
720 SelectionDAG &DAG) const {
721 SDLoc DL(Op);
722 fail(DL, DAG, "unsupported dynamic stack allocation");
723 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
724 return DAG.getMergeValues(Ops, SDLoc());
725}
726
727SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
728 SDValue Chain = Op.getOperand(0);
729 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
730 SDValue LHS = Op.getOperand(2);
731 SDValue RHS = Op.getOperand(3);
732 SDValue Dest = Op.getOperand(4);
733 SDLoc DL(Op);
734
735 if (!getHasJmpExt())
736 NegateCC(LHS, RHS, CC);
737
738 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
739 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
740}
741
742SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
743 SDValue LHS = Op.getOperand(0);
744 SDValue RHS = Op.getOperand(1);
745 SDValue TrueV = Op.getOperand(2);
746 SDValue FalseV = Op.getOperand(3);
747 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
748 SDLoc DL(Op);
749
750 if (!getHasJmpExt())
751 NegateCC(LHS, RHS, CC);
752
753 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
754 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
755
756 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);
757}
758
759SDValue BPFTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
760 SelectionDAG &DAG) const {
761 SDNode *N = Op.getNode();
762 SDLoc DL(N);
763
764 if (cast<AtomicSDNode>(N)->getMergedOrdering() ==
766 fail(DL, DAG,
767 "sequentially consistent (seq_cst) "
768 "atomic load/store is not supported");
769
770 return Op;
771}
772
774 if (auto *Fn = M->getFunction(BPF_TRAP))
775 return Fn;
776
777 FunctionType *FT = FunctionType::get(Type::getVoidTy(M->getContext()), false);
778 Function *NewF =
780 NewF->setDSOLocal(true);
782 NewF->setSection(".ksyms");
783
784 if (M->debug_compile_units().empty())
785 return NewF;
786
787 DIBuilder DBuilder(*M);
788 DITypeArray ParamTypes =
789 DBuilder.getOrCreateTypeArray({nullptr /*void return*/});
790 DISubroutineType *FuncType = DBuilder.createSubroutineType(ParamTypes);
791 DICompileUnit *CU = *M->debug_compile_units_begin();
792 DISubprogram *SP =
793 DBuilder.createFunction(CU, BPF_TRAP, BPF_TRAP, nullptr, 0, FuncType, 0,
794 DINode::FlagZero, DISubprogram::SPFlagZero);
795 NewF->setSubprogram(SP);
796 return NewF;
797}
798
799SDValue BPFTargetLowering::LowerTRAP(SDValue Op, SelectionDAG &DAG) const {
800 MachineFunction &MF = DAG.getMachineFunction();
801 TargetLowering::CallLoweringInfo CLI(DAG);
803 SDNode *N = Op.getNode();
804 SDLoc DL(N);
805
807 auto PtrVT = getPointerTy(MF.getDataLayout());
808 CLI.Callee = DAG.getTargetGlobalAddress(Fn, DL, PtrVT);
809 CLI.Chain = N->getOperand(0);
810 CLI.IsTailCall = false;
812 CLI.IsVarArg = false;
813 CLI.DL = std::move(DL);
814 CLI.NoMerge = false;
815 CLI.DoesNotReturn = true;
816 return LowerCall(CLI, InVals);
817}
818
819SDValue BPFTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
820 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
821 return getAddr(N, DAG);
822}
823
825 SelectionDAG &DAG, unsigned Flags) {
826 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
827 N->getOffset(), Flags);
828}
829
831 SelectionDAG &DAG, unsigned Flags) {
832 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
833}
834
835template <class NodeTy>
836SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
837 unsigned Flags) const {
838 SDLoc DL(N);
839
840 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
841
842 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
843}
844
845SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
846 SelectionDAG &DAG) const {
847 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
848 if (N->getOffset() != 0)
849 report_fatal_error("invalid offset for global address: " +
850 Twine(N->getOffset()));
851
852 const GlobalValue *GVal = N->getGlobal();
853 SDLoc DL(Op);
854
855 // Wrap it in a TargetGlobalAddress
856 SDValue Addr = DAG.getTargetGlobalAddress(GVal, DL, MVT::i64);
857
858 // Emit pseudo instruction
859 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
860}
861
862SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
863 SelectionDAG &DAG) const {
864 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
865
866 return getAddr(N, DAG);
867}
868
869SDValue BPFTargetLowering::LowerBlockAddress(SDValue Op,
870 SelectionDAG &DAG) const {
871 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
872 SDLoc DL(Op);
873
874 // Wrap it in a TargetBlockAddress
875 SDValue Addr = DAG.getTargetBlockAddress(BA, MVT::i64);
876
877 // Emit pseudo instruction
878 return SDValue(DAG.getMachineNode(BPF::LDIMM64, DL, MVT::i64, Addr), 0);
879}
880
881unsigned
882BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
883 unsigned Reg, bool isSigned) const {
884 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
885 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
886 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
887 MachineFunction *F = BB->getParent();
888 DebugLoc DL = MI.getDebugLoc();
889
890 MachineRegisterInfo &RegInfo = F->getRegInfo();
891
892 if (!isSigned) {
893 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
894 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
895 return PromotedReg0;
896 }
897 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
898 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
899 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
900 if (HasMovsx) {
901 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
902 } else {
903 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
904 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
905 .addReg(PromotedReg0).addImm(32);
906 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
907 .addReg(PromotedReg1).addImm(32);
908 }
909
910 return PromotedReg2;
911}
912
914BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
916 const {
917 MachineFunction *MF = MI.getParent()->getParent();
918 MachineRegisterInfo &MRI = MF->getRegInfo();
919 MachineInstrBuilder MIB(*MF, MI);
920 unsigned ScratchReg;
921
922 // This function does custom insertion during lowering BPFISD::MEMCPY which
923 // only has two register operands from memcpy semantics, the copy source
924 // address and the copy destination address.
925 //
926 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
927 // a third scratch register to serve as the destination register of load and
928 // source register of store.
929 //
930 // The scratch register here is with the Define | Dead | EarlyClobber flags.
931 // The EarlyClobber flag has the semantic property that the operand it is
932 // attached to is clobbered before the rest of the inputs are read. Hence it
933 // must be unique among the operands to the instruction. The Define flag is
934 // needed to coerce the machine verifier that an Undef value isn't a problem
935 // as we anyway is loading memory into it. The Dead flag is needed as the
936 // value in scratch isn't supposed to be used by any other instruction.
937 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
938 MIB.addReg(ScratchReg,
940
941 return BB;
942}
943
944MachineBasicBlock *BPFTargetLowering::EmitInstrWithCustomInserterLDimm64(
945 MachineInstr &MI, MachineBasicBlock *BB) const {
946 MachineFunction *MF = BB->getParent();
947 const BPFInstrInfo *TII = MF->getSubtarget<BPFSubtarget>().getInstrInfo();
948 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
949 MachineRegisterInfo &RegInfo = MF->getRegInfo();
950 DebugLoc DL = MI.getDebugLoc();
951
952 // Build address taken map for Global Varaibles and BlockAddresses
953 DenseMap<const BasicBlock *, MachineBasicBlock *> AddressTakenBBs;
954 for (MachineBasicBlock &MBB : *MF) {
955 if (const BasicBlock *BB = MBB.getBasicBlock())
956 if (BB->hasAddressTaken())
957 AddressTakenBBs[BB] = &MBB;
958 }
959
960 MachineOperand &MO = MI.getOperand(1);
961 assert(MO.isBlockAddress() || MO.isGlobal());
962
963 Register ResultReg = MI.getOperand(0).getReg();
964 Register TmpReg = RegInfo.createVirtualRegister(RC);
965
966 std::vector<MachineBasicBlock *> Targets;
967 unsigned JTI;
968
969 if (MO.isBlockAddress()) {
970 auto *BA = MO.getBlockAddress();
971 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
972 assert(TgtMBB);
973
974 Targets.push_back(TgtMBB);
975 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
976 ->createJumpTableIndex(Targets);
977
978 BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), TmpReg)
979 .addJumpTableIndex(JTI);
980 BuildMI(*BB, MI, DL, TII->get(BPF::LDD), ResultReg)
981 .addReg(TmpReg)
982 .addImm(0);
983 MI.eraseFromParent();
984 return BB;
985 }
986
987 // Helper: emit LD_imm64 with operand GlobalAddress or JumpTable
988 auto emitLDImm64 = [&](const GlobalValue *GV = nullptr, unsigned JTI = -1) {
989 auto MIB = BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), ResultReg);
990 if (GV)
991 MIB.addGlobalAddress(GV);
992 else
993 MIB.addJumpTableIndex(JTI);
994 MI.eraseFromParent();
995 return BB;
996 };
997
998 // Must be a global at this point
999 const GlobalValue *GVal = MO.getGlobal();
1000 const auto *GV = dyn_cast<GlobalVariable>(GVal);
1001
1002 if (!GV || GV->getLinkage() != GlobalValue::PrivateLinkage ||
1003 !GV->isConstant() || !GV->hasInitializer())
1004 return emitLDImm64(GVal);
1005
1006 const auto *CA = dyn_cast<ConstantArray>(GV->getInitializer());
1007 if (!CA)
1008 return emitLDImm64(GVal);
1009
1010 for (const Use &Op : CA->operands()) {
1011 if (!isa<BlockAddress>(Op))
1012 return emitLDImm64(GVal);
1013 auto *BA = cast<BlockAddress>(Op);
1014 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];
1015 assert(TgtMBB);
1016 Targets.push_back(TgtMBB);
1017 }
1018
1019 JTI = MF->getOrCreateJumpTableInfo(getJumpTableEncoding())
1020 ->createJumpTableIndex(Targets);
1021 return emitLDImm64(nullptr, JTI);
1022}
1023
1026 MachineBasicBlock *BB) const {
1028 DebugLoc DL = MI.getDebugLoc();
1029 unsigned Opc = MI.getOpcode();
1030 bool isSelectRROp = (Opc == BPF::Select ||
1031 Opc == BPF::Select_64_32 ||
1032 Opc == BPF::Select_32 ||
1033 Opc == BPF::Select_32_64);
1034
1035 bool isMemcpyOp = Opc == BPF::MEMCPY;
1036 bool isLDimm64Op = Opc == BPF::LDIMM64;
1037
1038#ifndef NDEBUG
1039 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
1040 Opc == BPF::Select_Ri_64_32 ||
1041 Opc == BPF::Select_Ri_32 ||
1042 Opc == BPF::Select_Ri_32_64);
1043
1044 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp || isLDimm64Op))
1045 report_fatal_error("unhandled instruction type: " + Twine(Opc));
1046#endif
1047
1048 if (isMemcpyOp)
1049 return EmitInstrWithCustomInserterMemcpy(MI, BB);
1050
1051 if (isLDimm64Op)
1052 return EmitInstrWithCustomInserterLDimm64(MI, BB);
1053
1054 bool is32BitCmp = (Opc == BPF::Select_32 ||
1055 Opc == BPF::Select_32_64 ||
1056 Opc == BPF::Select_Ri_32 ||
1057 Opc == BPF::Select_Ri_32_64);
1058
1059 // To "insert" a SELECT instruction, we actually have to insert the diamond
1060 // control-flow pattern. The incoming instruction knows the destination vreg
1061 // to set, the condition code register to branch on, the true/false values to
1062 // select between, and a branch opcode to use.
1063 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1065
1066 // ThisMBB:
1067 // ...
1068 // TrueVal = ...
1069 // jmp_XX r1, r2 goto Copy1MBB
1070 // fallthrough --> Copy0MBB
1071 MachineBasicBlock *ThisMBB = BB;
1072 MachineFunction *F = BB->getParent();
1073 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1074 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
1075
1076 F->insert(I, Copy0MBB);
1077 F->insert(I, Copy1MBB);
1078 // Update machine-CFG edges by transferring all successors of the current
1079 // block to the new block which will contain the Phi node for the select.
1080 Copy1MBB->splice(Copy1MBB->begin(), BB,
1081 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1082 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
1083 // Next, add the true and fallthrough blocks as its successors.
1084 BB->addSuccessor(Copy0MBB);
1085 BB->addSuccessor(Copy1MBB);
1086
1087 // Insert Branch if Flag
1088 int CC = MI.getOperand(3).getImm();
1089 int NewCC;
1090 switch (CC) {
1091#define SET_NEWCC(X, Y) \
1092 case ISD::X: \
1093 if (is32BitCmp && HasJmp32) \
1094 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
1095 else \
1096 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
1097 break
1098 SET_NEWCC(SETGT, JSGT);
1099 SET_NEWCC(SETUGT, JUGT);
1100 SET_NEWCC(SETGE, JSGE);
1101 SET_NEWCC(SETUGE, JUGE);
1102 SET_NEWCC(SETEQ, JEQ);
1103 SET_NEWCC(SETNE, JNE);
1104 SET_NEWCC(SETLT, JSLT);
1105 SET_NEWCC(SETULT, JULT);
1106 SET_NEWCC(SETLE, JSLE);
1107 SET_NEWCC(SETULE, JULE);
1108 default:
1109 report_fatal_error("unimplemented select CondCode " + Twine(CC));
1110 }
1111
1112 Register LHS = MI.getOperand(1).getReg();
1113 bool isSignedCmp = (CC == ISD::SETGT ||
1114 CC == ISD::SETGE ||
1115 CC == ISD::SETLT ||
1116 CC == ISD::SETLE);
1117
1118 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
1119 // to be promoted, however if the 32-bit comparison operands are destination
1120 // registers then they are implicitly zero-extended already, there is no
1121 // need of explicit zero-extend sequence for them.
1122 //
1123 // We simply do extension for all situations in this method, but we will
1124 // try to remove those unnecessary in BPFMIPeephole pass.
1125 if (is32BitCmp && !HasJmp32)
1126 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
1127
1128 if (isSelectRROp) {
1129 Register RHS = MI.getOperand(2).getReg();
1130
1131 if (is32BitCmp && !HasJmp32)
1132 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
1133
1134 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
1135 } else {
1136 int64_t imm32 = MI.getOperand(2).getImm();
1137 // Check before we build J*_ri instruction.
1138 if (!isInt<32>(imm32))
1139 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
1140 BuildMI(BB, DL, TII.get(NewCC))
1141 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
1142 }
1143
1144 // Copy0MBB:
1145 // %FalseValue = ...
1146 // # fallthrough to Copy1MBB
1147 BB = Copy0MBB;
1148
1149 // Update machine-CFG edges
1150 BB->addSuccessor(Copy1MBB);
1151
1152 // Copy1MBB:
1153 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
1154 // ...
1155 BB = Copy1MBB;
1156 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
1157 .addReg(MI.getOperand(5).getReg())
1158 .addMBB(Copy0MBB)
1159 .addReg(MI.getOperand(4).getReg())
1160 .addMBB(ThisMBB);
1161
1162 MI.eraseFromParent(); // The pseudo instruction is gone now.
1163 return BB;
1164}
1165
1167 EVT VT) const {
1168 return getHasAlu32() ? MVT::i32 : MVT::i64;
1169}
1170
1172 EVT VT) const {
1173 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
1174}
1175
1176bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1177 const AddrMode &AM, Type *Ty,
1178 unsigned AS,
1179 Instruction *I) const {
1180 // No global is ever allowed as a base.
1181 if (AM.BaseGV)
1182 return false;
1183
1184 switch (AM.Scale) {
1185 case 0: // "r+i" or just "i", depending on HasBaseReg.
1186 break;
1187 case 1:
1188 if (!AM.HasBaseReg) // allow "r+i".
1189 break;
1190 return false; // disallow "r+r" or "r+r+i".
1191 default:
1192 return false;
1193 }
1194
1195 return true;
1196}
1197
1198bool BPFTargetLowering::CanLowerReturn(
1199 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
1201 const Type *RetTy) const {
1203 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1204 return CCInfo.CheckReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
1205}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)
static Function * createBPFUnreachable(Module *M)
static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static cl::opt< unsigned > BPFMinimumJumpTableEntries("bpf-min-jump-table-entries", cl::init(13), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on BPF"))
static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
#define BPF_TRAP
Definition BPF.h:25
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
bool getAllowsMisalignedMemAccess() const
bool getHasJmpExt() const
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
bool hasLdsx() const
bool hasGotox() const
bool hasMovsx() const
bool getHasJmp32() const
const BPFRegisterInfo * getRegisterInfo() const override
bool getHasAlu32() const
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *) const override
Determine if the target supports unaligned memory accesses.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
BasicBlock * getBasicBlock() const
Definition Constants.h:1100
CCState - This class holds information needed while lowering arguments and return values.
Register getLocReg() const
LocInfo getLocInfo() const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
LLVM_ABI DISubroutineType * createSubroutineType(DITypeArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)
Create subroutine type.
LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)
Create a new descriptor for the specified subprogram.
LLVM_ABI DITypeArray getOrCreateTypeArray(ArrayRef< Metadata * > Elements)
Get a DITypeArray, create one if required.
Subprogram description. Uses SubclassData1.
Type array for a subprogram.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void setSubprogram(DISubprogram *SP)
Set the attached subprogram.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:168
void setCallingConv(CallingConv::ID CC)
Definition Function.h:276
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:284
LinkageTypes getLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool hasAddressTaken() const
Test whether this block is used as something other than the target of a terminator,...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
Flags
Flags values. These may be or'd together.
const GlobalValue * getGlobal() const
const BlockAddress * getBlockAddress() const
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:20
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:286
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
self_iterator getIterator()
Definition ilist_node.h:123
A raw_ostream that writes to an std::string.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:788
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ GlobalAddress
Definition ISDOpcodes.h:88
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition ISDOpcodes.h:787
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:811
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:888
@ TRAP
TRAP - Trapping instruction.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:833
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition ISDOpcodes.h:62
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
@ Dead
Unused definition.
@ EarlyClobber
Register definition happens before uses.
@ Define
Register definition.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition STLExtras.h:1152
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
void print(raw_ostream &OS) const
Implement operator<<.
Definition ValueTypes.h:512
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs