LLVM 20.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
26#include "llvm/Support/Debug.h"
30
31using namespace llvm;
32
33#define DEBUG_TYPE "bpf-lower"
34
35static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
36 cl::Hidden, cl::init(false),
37 cl::desc("Expand memcpy into load/store pairs in order"));
38
39static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
40 SDValue Val = {}) {
41 std::string Str;
42 if (Val) {
44 Val->print(OS);
45 OS << ' ';
46 }
49 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
50}
51
53 const BPFSubtarget &STI)
54 : TargetLowering(TM) {
55
56 // Set up the register classes.
57 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
58 if (STI.getHasAlu32())
59 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
60
61 // Compute derived properties from the register classes
63
65
70
72
76
77 // Set unsupported atomic operations as Custom so
78 // we can emit better error messages than fatal error
79 // from selectiondag.
80 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
81 if (VT == MVT::i32) {
82 if (STI.getHasAlu32())
83 continue;
84 } else {
86 }
87
93 }
94
95 for (auto VT : { MVT::i32, MVT::i64 }) {
96 if (VT == MVT::i32 && !STI.getHasAlu32())
97 continue;
98
101 if (!STI.hasSdivSmod()) {
104 }
119
123 }
124
125 if (STI.getHasAlu32()) {
128 STI.getHasJmp32() ? Custom : Promote);
129 }
130
132 if (!STI.hasMovsx()) {
136 }
137
138 // Extended load operations for i1 types must be promoted
139 for (MVT VT : MVT::integer_valuetypes()) {
143
144 if (!STI.hasLdsx()) {
146 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
147 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
148 }
149 }
150
153
154 // Function alignments
157
159 // LLVM generic code will try to expand memcpy into load/store pairs at this
160 // stage which is before quite a few IR optimization passes, therefore the
161 // loads and stores could potentially be moved apart from each other which
162 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
163 // compilers.
164 //
165 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
166 // of memcpy to later stage in IR optimization pipeline so those load/store
167 // pairs won't be touched and could be kept in order. Hence, we set
168 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
169 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
174 } else {
175 // inline memcpy() for kernel to see explicit copy
176 unsigned CommonMaxStores =
178
183 }
184
185 // CPU/Feature control
186 HasAlu32 = STI.getHasAlu32();
187 HasJmp32 = STI.getHasJmp32();
188 HasJmpExt = STI.getHasJmpExt();
189 HasMovsx = STI.hasMovsx();
190}
191
193 return false;
194}
195
196bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
197 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
198 return false;
199 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
200 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
201 return NumBits1 > NumBits2;
202}
203
204bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
205 if (!VT1.isInteger() || !VT2.isInteger())
206 return false;
207 unsigned NumBits1 = VT1.getSizeInBits();
208 unsigned NumBits2 = VT2.getSizeInBits();
209 return NumBits1 > NumBits2;
210}
211
212bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
213 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
214 return false;
215 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
216 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
217 return NumBits1 == 32 && NumBits2 == 64;
218}
219
220bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
221 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
222 return false;
223 unsigned NumBits1 = VT1.getSizeInBits();
224 unsigned NumBits2 = VT2.getSizeInBits();
225 return NumBits1 == 32 && NumBits2 == 64;
226}
227
228bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
229 EVT VT1 = Val.getValueType();
230 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
231 MVT MT1 = VT1.getSimpleVT().SimpleTy;
232 MVT MT2 = VT2.getSimpleVT().SimpleTy;
233 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
234 (MT2 == MVT::i32 || MT2 == MVT::i64))
235 return true;
236 }
237 return TargetLoweringBase::isZExtFree(Val, VT2);
238}
239
242 if (Constraint.size() == 1) {
243 switch (Constraint[0]) {
244 default:
245 break;
246 case 'w':
247 return C_RegisterClass;
248 }
249 }
250
251 return TargetLowering::getConstraintType(Constraint);
252}
253
254std::pair<unsigned, const TargetRegisterClass *>
256 StringRef Constraint,
257 MVT VT) const {
258 if (Constraint.size() == 1) {
259 // GCC Constraint Letters
260 switch (Constraint[0]) {
261 case 'r': // GENERAL_REGS
262 return std::make_pair(0U, &BPF::GPRRegClass);
263 case 'w':
264 if (HasAlu32)
265 return std::make_pair(0U, &BPF::GPR32RegClass);
266 break;
267 default:
268 break;
269 }
270 }
271
273}
274
275void BPFTargetLowering::ReplaceNodeResults(
277 const char *Msg;
278 uint32_t Opcode = N->getOpcode();
279 switch (Opcode) {
280 default:
281 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
286 case ISD::ATOMIC_SWAP:
288 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
289 Msg = "unsupported atomic operation, please use 32/64 bit version";
290 else
291 Msg = "unsupported atomic operation, please use 64 bit version";
292 break;
293 }
294
295 SDLoc DL(N);
296 // We'll still produce a fatal error downstream, but this diagnostic is more
297 // user-friendly.
298 fail(DL, DAG, Msg);
299}
300
302 switch (Op.getOpcode()) {
303 default:
304 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
305 case ISD::BR_CC:
306 return LowerBR_CC(Op, DAG);
308 return LowerGlobalAddress(Op, DAG);
310 return LowerConstantPool(Op, DAG);
311 case ISD::SELECT_CC:
312 return LowerSELECT_CC(Op, DAG);
313 case ISD::SDIV:
314 case ISD::SREM:
315 return LowerSDIVSREM(Op, DAG);
317 return LowerDYNAMIC_STACKALLOC(Op, DAG);
318 }
319}
320
321// Calling Convention Implementation
322#include "BPFGenCallingConv.inc"
323
324SDValue BPFTargetLowering::LowerFormalArguments(
325 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
326 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
327 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
328 switch (CallConv) {
329 default:
330 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
331 case CallingConv::C:
333 break;
334 }
335
338
339 // Assign locations to all of the incoming arguments.
341 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
342 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
343
344 bool HasMemArgs = false;
345 for (size_t I = 0; I < ArgLocs.size(); ++I) {
346 auto &VA = ArgLocs[I];
347
348 if (VA.isRegLoc()) {
349 // Arguments passed in registers
350 EVT RegVT = VA.getLocVT();
351 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
352 switch (SimpleTy) {
353 default: {
354 std::string Str;
355 {
357 RegVT.print(OS);
358 }
359 report_fatal_error("unhandled argument type: " + Twine(Str));
360 }
361 case MVT::i32:
362 case MVT::i64:
363 Register VReg = RegInfo.createVirtualRegister(
364 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
365 RegInfo.addLiveIn(VA.getLocReg(), VReg);
366 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
367
368 // If this is an value that has been promoted to wider types, insert an
369 // assert[sz]ext to capture this, then truncate to the right size.
370 if (VA.getLocInfo() == CCValAssign::SExt)
371 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
372 DAG.getValueType(VA.getValVT()));
373 else if (VA.getLocInfo() == CCValAssign::ZExt)
374 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
375 DAG.getValueType(VA.getValVT()));
376
377 if (VA.getLocInfo() != CCValAssign::Full)
378 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
379
380 InVals.push_back(ArgValue);
381
382 break;
383 }
384 } else {
385 if (VA.isMemLoc())
386 HasMemArgs = true;
387 else
388 report_fatal_error("unhandled argument location");
389 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
390 }
391 }
392 if (HasMemArgs)
393 fail(DL, DAG, "stack arguments are not supported");
394 if (IsVarArg)
395 fail(DL, DAG, "variadic functions are not supported");
396 if (MF.getFunction().hasStructRetAttr())
397 fail(DL, DAG, "aggregate returns are not supported");
398
399 return Chain;
400}
401
402const size_t BPFTargetLowering::MaxArgs = 5;
403
404static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask,
405 MCRegister Reg) {
406 for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
407 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));
408}
409
411 MachineFunction &MF,
412 const uint32_t *BaseRegMask) {
413 uint32_t *RegMask = MF.allocateRegMask();
414 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
415 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);
416 return RegMask;
417}
418
419SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
420 SmallVectorImpl<SDValue> &InVals) const {
421 SelectionDAG &DAG = CLI.DAG;
422 auto &Outs = CLI.Outs;
423 auto &OutVals = CLI.OutVals;
424 auto &Ins = CLI.Ins;
425 SDValue Chain = CLI.Chain;
426 SDValue Callee = CLI.Callee;
427 bool &IsTailCall = CLI.IsTailCall;
428 CallingConv::ID CallConv = CLI.CallConv;
429 bool IsVarArg = CLI.IsVarArg;
431
432 // BPF target does not support tail call optimization.
433 IsTailCall = false;
434
435 switch (CallConv) {
436 default:
437 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
439 case CallingConv::C:
440 break;
441 }
442
443 // Analyze operands of the call, assigning locations to each operand.
445 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
446
447 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
448
449 unsigned NumBytes = CCInfo.getStackSize();
450
451 if (Outs.size() > MaxArgs)
452 fail(CLI.DL, DAG, "too many arguments", Callee);
453
454 for (auto &Arg : Outs) {
455 ISD::ArgFlagsTy Flags = Arg.Flags;
456 if (!Flags.isByVal())
457 continue;
458 fail(CLI.DL, DAG, "pass by value not supported", Callee);
459 break;
460 }
461
462 auto PtrVT = getPointerTy(MF.getDataLayout());
463 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
464
465 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
466
467 // Walk arg assignments
468 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {
469 CCValAssign &VA = ArgLocs[i];
470 SDValue &Arg = OutVals[i];
471
472 // Promote the value if needed.
473 switch (VA.getLocInfo()) {
474 default:
475 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
477 break;
479 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
480 break;
482 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
483 break;
485 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
486 break;
487 }
488
489 // Push arguments into RegsToPass vector
490 if (VA.isRegLoc())
491 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
492 else
493 report_fatal_error("stack arguments are not supported");
494 }
495
496 SDValue InGlue;
497
498 // Build a sequence of copy-to-reg nodes chained together with token chain and
499 // flag operands which copy the outgoing args into registers. The InGlue in
500 // necessary since all emitted instructions must be stuck together.
501 for (auto &Reg : RegsToPass) {
502 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
503 InGlue = Chain.getValue(1);
504 }
505
506 // If the callee is a GlobalAddress node (quite common, every direct call is)
507 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
508 // Likewise ExternalSymbol -> TargetExternalSymbol.
509 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
510 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
511 G->getOffset(), 0);
512 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
513 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
514 fail(CLI.DL, DAG,
515 Twine("A call to built-in function '" + StringRef(E->getSymbol()) +
516 "' is not supported."));
517 }
518
519 // Returns a chain & a flag for retval copy to use.
520 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
522 Ops.push_back(Chain);
523 Ops.push_back(Callee);
524
525 // Add argument registers to the end of the list so that they are
526 // known live into the call.
527 for (auto &Reg : RegsToPass)
528 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
529
530 bool HasFastCall =
531 (CLI.CB && isa<CallInst>(CLI.CB) && CLI.CB->hasFnAttr("bpf_fastcall"));
533 if (HasFastCall) {
534 uint32_t *RegMask = regMaskFromTemplate(
535 TRI, MF, TRI->getCallPreservedMask(MF, CallingConv::PreserveAll));
536 for (auto const &RegPair : RegsToPass)
537 resetRegMaskBit(TRI, RegMask, RegPair.first);
538 if (!CLI.CB->getType()->isVoidTy())
539 resetRegMaskBit(TRI, RegMask, BPF::R0);
540 Ops.push_back(DAG.getRegisterMask(RegMask));
541 } else {
542 Ops.push_back(
543 DAG.getRegisterMask(TRI->getCallPreservedMask(MF, CLI.CallConv)));
544 }
545
546 if (InGlue.getNode())
547 Ops.push_back(InGlue);
548
549 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
550 InGlue = Chain.getValue(1);
551
552 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
553
554 // Create the CALLSEQ_END node.
555 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
556 InGlue = Chain.getValue(1);
557
558 // Handle result values, copying them out of physregs into vregs that we
559 // return.
560 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
561 InVals);
562}
563
565BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
566 bool IsVarArg,
568 const SmallVectorImpl<SDValue> &OutVals,
569 const SDLoc &DL, SelectionDAG &DAG) const {
570 unsigned Opc = BPFISD::RET_GLUE;
571
572 // CCValAssign - represent the assignment of the return value to a location
575
576 // CCState - Info about the registers and stack slot.
577 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
578
580 fail(DL, DAG, "aggregate returns are not supported");
581 return DAG.getNode(Opc, DL, MVT::Other, Chain);
582 }
583
584 // Analize return values.
585 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
586
587 SDValue Glue;
588 SmallVector<SDValue, 4> RetOps(1, Chain);
589
590 // Copy the result values into the output registers.
591 for (size_t i = 0; i != RVLocs.size(); ++i) {
592 CCValAssign &VA = RVLocs[i];
593 if (!VA.isRegLoc())
594 report_fatal_error("stack return values are not supported");
595
596 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
597
598 // Guarantee that all emitted copies are stuck together,
599 // avoiding something bad.
600 Glue = Chain.getValue(1);
601 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
602 }
603
604 RetOps[0] = Chain; // Update chain.
605
606 // Add the glue if we have it.
607 if (Glue.getNode())
608 RetOps.push_back(Glue);
609
610 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
611}
612
613SDValue BPFTargetLowering::LowerCallResult(
614 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
615 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
616 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
617
619 // Assign locations to each value returned by this call.
621 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
622
623 if (Ins.size() > 1) {
624 fail(DL, DAG, "only small returns supported");
625 for (auto &In : Ins)
626 InVals.push_back(DAG.getConstant(0, DL, In.VT));
627 return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InGlue).getValue(1);
628 }
629
630 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
631
632 // Copy all of the result registers out of their specified physreg.
633 for (auto &Val : RVLocs) {
634 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
635 Val.getValVT(), InGlue).getValue(1);
636 InGlue = Chain.getValue(2);
637 InVals.push_back(Chain.getValue(0));
638 }
639
640 return Chain;
641}
642
643static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
644 switch (CC) {
645 default:
646 break;
647 case ISD::SETULT:
648 case ISD::SETULE:
649 case ISD::SETLT:
650 case ISD::SETLE:
652 std::swap(LHS, RHS);
653 break;
654 }
655}
656
657SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
658 SDLoc DL(Op);
659 fail(DL, DAG,
660 "unsupported signed division, please convert to unsigned div/mod.");
661 return DAG.getUNDEF(Op->getValueType(0));
662}
663
664SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
665 SelectionDAG &DAG) const {
666 SDLoc DL(Op);
667 fail(DL, DAG, "unsupported dynamic stack allocation");
668 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
669 return DAG.getMergeValues(Ops, SDLoc());
670}
671
672SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
673 SDValue Chain = Op.getOperand(0);
674 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
675 SDValue LHS = Op.getOperand(2);
676 SDValue RHS = Op.getOperand(3);
677 SDValue Dest = Op.getOperand(4);
678 SDLoc DL(Op);
679
680 if (!getHasJmpExt())
681 NegateCC(LHS, RHS, CC);
682
683 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
684 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
685}
686
687SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
688 SDValue LHS = Op.getOperand(0);
689 SDValue RHS = Op.getOperand(1);
690 SDValue TrueV = Op.getOperand(2);
691 SDValue FalseV = Op.getOperand(3);
692 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
693 SDLoc DL(Op);
694
695 if (!getHasJmpExt())
696 NegateCC(LHS, RHS, CC);
697
698 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
699 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
700
701 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);
702}
703
704const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const {
705 switch ((BPFISD::NodeType)Opcode) {
707 break;
708 case BPFISD::RET_GLUE:
709 return "BPFISD::RET_GLUE";
710 case BPFISD::CALL:
711 return "BPFISD::CALL";
713 return "BPFISD::SELECT_CC";
714 case BPFISD::BR_CC:
715 return "BPFISD::BR_CC";
716 case BPFISD::Wrapper:
717 return "BPFISD::Wrapper";
718 case BPFISD::MEMCPY:
719 return "BPFISD::MEMCPY";
720 }
721 return nullptr;
722}
723
725 SelectionDAG &DAG, unsigned Flags) {
726 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
727}
728
730 SelectionDAG &DAG, unsigned Flags) {
731 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
732 N->getOffset(), Flags);
733}
734
735template <class NodeTy>
736SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
737 unsigned Flags) const {
738 SDLoc DL(N);
739
740 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
741
742 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
743}
744
745SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
746 SelectionDAG &DAG) const {
747 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
748 if (N->getOffset() != 0)
749 report_fatal_error("invalid offset for global address: " +
750 Twine(N->getOffset()));
751 return getAddr(N, DAG);
752}
753
754SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
755 SelectionDAG &DAG) const {
756 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
757
758 return getAddr(N, DAG);
759}
760
761unsigned
762BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
763 unsigned Reg, bool isSigned) const {
765 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
766 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
767 MachineFunction *F = BB->getParent();
768 DebugLoc DL = MI.getDebugLoc();
769
770 MachineRegisterInfo &RegInfo = F->getRegInfo();
771
772 if (!isSigned) {
773 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
774 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
775 return PromotedReg0;
776 }
777 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
778 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
779 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
780 if (HasMovsx) {
781 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
782 } else {
783 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
784 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
785 .addReg(PromotedReg0).addImm(32);
786 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
787 .addReg(PromotedReg1).addImm(32);
788 }
789
790 return PromotedReg2;
791}
792
794BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
796 const {
797 MachineFunction *MF = MI.getParent()->getParent();
799 MachineInstrBuilder MIB(*MF, MI);
800 unsigned ScratchReg;
801
802 // This function does custom insertion during lowering BPFISD::MEMCPY which
803 // only has two register operands from memcpy semantics, the copy source
804 // address and the copy destination address.
805 //
806 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
807 // a third scratch register to serve as the destination register of load and
808 // source register of store.
809 //
810 // The scratch register here is with the Define | Dead | EarlyClobber flags.
811 // The EarlyClobber flag has the semantic property that the operand it is
812 // attached to is clobbered before the rest of the inputs are read. Hence it
813 // must be unique among the operands to the instruction. The Define flag is
814 // needed to coerce the machine verifier that an Undef value isn't a problem
815 // as we anyway is loading memory into it. The Dead flag is needed as the
816 // value in scratch isn't supposed to be used by any other instruction.
817 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
818 MIB.addReg(ScratchReg,
820
821 return BB;
822}
823
826 MachineBasicBlock *BB) const {
828 DebugLoc DL = MI.getDebugLoc();
829 unsigned Opc = MI.getOpcode();
830 bool isSelectRROp = (Opc == BPF::Select ||
831 Opc == BPF::Select_64_32 ||
832 Opc == BPF::Select_32 ||
833 Opc == BPF::Select_32_64);
834
835 bool isMemcpyOp = Opc == BPF::MEMCPY;
836
837#ifndef NDEBUG
838 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
839 Opc == BPF::Select_Ri_64_32 ||
840 Opc == BPF::Select_Ri_32 ||
841 Opc == BPF::Select_Ri_32_64);
842
843 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp))
844 report_fatal_error("unhandled instruction type: " + Twine(Opc));
845#endif
846
847 if (isMemcpyOp)
848 return EmitInstrWithCustomInserterMemcpy(MI, BB);
849
850 bool is32BitCmp = (Opc == BPF::Select_32 ||
851 Opc == BPF::Select_32_64 ||
852 Opc == BPF::Select_Ri_32 ||
853 Opc == BPF::Select_Ri_32_64);
854
855 // To "insert" a SELECT instruction, we actually have to insert the diamond
856 // control-flow pattern. The incoming instruction knows the destination vreg
857 // to set, the condition code register to branch on, the true/false values to
858 // select between, and a branch opcode to use.
859 const BasicBlock *LLVM_BB = BB->getBasicBlock();
861
862 // ThisMBB:
863 // ...
864 // TrueVal = ...
865 // jmp_XX r1, r2 goto Copy1MBB
866 // fallthrough --> Copy0MBB
867 MachineBasicBlock *ThisMBB = BB;
868 MachineFunction *F = BB->getParent();
869 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
870 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
871
872 F->insert(I, Copy0MBB);
873 F->insert(I, Copy1MBB);
874 // Update machine-CFG edges by transferring all successors of the current
875 // block to the new block which will contain the Phi node for the select.
876 Copy1MBB->splice(Copy1MBB->begin(), BB,
877 std::next(MachineBasicBlock::iterator(MI)), BB->end());
879 // Next, add the true and fallthrough blocks as its successors.
880 BB->addSuccessor(Copy0MBB);
881 BB->addSuccessor(Copy1MBB);
882
883 // Insert Branch if Flag
884 int CC = MI.getOperand(3).getImm();
885 int NewCC;
886 switch (CC) {
887#define SET_NEWCC(X, Y) \
888 case ISD::X: \
889 if (is32BitCmp && HasJmp32) \
890 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
891 else \
892 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
893 break
894 SET_NEWCC(SETGT, JSGT);
895 SET_NEWCC(SETUGT, JUGT);
896 SET_NEWCC(SETGE, JSGE);
897 SET_NEWCC(SETUGE, JUGE);
898 SET_NEWCC(SETEQ, JEQ);
899 SET_NEWCC(SETNE, JNE);
900 SET_NEWCC(SETLT, JSLT);
901 SET_NEWCC(SETULT, JULT);
902 SET_NEWCC(SETLE, JSLE);
903 SET_NEWCC(SETULE, JULE);
904 default:
905 report_fatal_error("unimplemented select CondCode " + Twine(CC));
906 }
907
908 Register LHS = MI.getOperand(1).getReg();
909 bool isSignedCmp = (CC == ISD::SETGT ||
910 CC == ISD::SETGE ||
911 CC == ISD::SETLT ||
912 CC == ISD::SETLE);
913
914 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
915 // to be promoted, however if the 32-bit comparison operands are destination
916 // registers then they are implicitly zero-extended already, there is no
917 // need of explicit zero-extend sequence for them.
918 //
919 // We simply do extension for all situations in this method, but we will
920 // try to remove those unnecessary in BPFMIPeephole pass.
921 if (is32BitCmp && !HasJmp32)
922 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
923
924 if (isSelectRROp) {
925 Register RHS = MI.getOperand(2).getReg();
926
927 if (is32BitCmp && !HasJmp32)
928 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
929
930 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
931 } else {
932 int64_t imm32 = MI.getOperand(2).getImm();
933 // Check before we build J*_ri instruction.
934 if (!isInt<32>(imm32))
935 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
936 BuildMI(BB, DL, TII.get(NewCC))
937 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
938 }
939
940 // Copy0MBB:
941 // %FalseValue = ...
942 // # fallthrough to Copy1MBB
943 BB = Copy0MBB;
944
945 // Update machine-CFG edges
946 BB->addSuccessor(Copy1MBB);
947
948 // Copy1MBB:
949 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
950 // ...
951 BB = Copy1MBB;
952 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
953 .addReg(MI.getOperand(5).getReg())
954 .addMBB(Copy0MBB)
955 .addReg(MI.getOperand(4).getReg())
956 .addMBB(ThisMBB);
957
958 MI.eraseFromParent(); // The pseudo instruction is gone now.
959 return BB;
960}
961
963 EVT VT) const {
964 return getHasAlu32() ? MVT::i32 : MVT::i64;
965}
966
968 EVT VT) const {
969 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
970}
971
972bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
973 const AddrMode &AM, Type *Ty,
974 unsigned AS,
975 Instruction *I) const {
976 // No global is ever allowed as a base.
977 if (AM.BaseGV)
978 return false;
979
980 switch (AM.Scale) {
981 case 0: // "r+i" or just "i", depending on HasBaseReg.
982 break;
983 case 1:
984 if (!AM.HasBaseReg) // allow "r+i".
985 break;
986 return false; // disallow "r+r" or "r+r+i".
987 default:
988 return false;
989 }
990
991 return true;
992}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
raw_pwrite_stream & OS
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
Definition: BPFSubtarget.h:92
bool getHasJmpExt() const
Definition: BPFSubtarget.h:85
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
Definition: BPFSubtarget.h:105
bool hasLdsx() const
Definition: BPFSubtarget.h:89
bool hasMovsx() const
Definition: BPFSubtarget.h:90
bool getHasJmp32() const
Definition: BPFSubtarget.h:86
const BPFRegisterInfo * getRegisterInfo() const override
Definition: BPFSubtarget.h:108
bool getHasAlu32() const
Definition: BPFSubtarget.h:87
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
Definition: InstrTypes.h:1459
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:688
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:221
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:748
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
Definition: SelectionDAG.h:799
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getRegister(Register Reg, EVT VT)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
Definition: SelectionDAG.h:825
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:490
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
Definition: SelectionDAG.h:508
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:765
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:303
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
self_iterator getIterator()
Definition: ilist_node.h:132
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:780
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1197
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1193
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:753
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:744
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1102
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:1325
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:262
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1338
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1339
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:752
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1148
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1123
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:757
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:674
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1336
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:772
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1112
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:849
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1334
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1333
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:817
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1141
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:794
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ AssertZext
Definition: ISDOpcodes.h:62
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
@ Dead
Unused definition.
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
Definition: STLExtras.h:1192
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
void print(raw_ostream &OS) const
Implement operator<<.
Definition: ValueTypes.h:491
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals