LLVM 19.0.0git
BPFISelLowering.cpp
Go to the documentation of this file.
1//===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that BPF uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "BPFISelLowering.h"
15#include "BPF.h"
16#include "BPFSubtarget.h"
17#include "BPFTargetMachine.h"
27#include "llvm/Support/Debug.h"
31
32using namespace llvm;
33
34#define DEBUG_TYPE "bpf-lower"
35
36static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
37 cl::Hidden, cl::init(false),
38 cl::desc("Expand memcpy into load/store pairs in order"));
39
40static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
41 SDValue Val = {}) {
42 std::string Str;
43 if (Val) {
45 Val->print(OS);
46 OS << ' ';
47 }
50 MF.getFunction(), Twine(Str).concat(Msg), DL.getDebugLoc()));
51}
52
54 const BPFSubtarget &STI)
56
57 // Set up the register classes.
58 addRegisterClass(MVT::i64, &BPF::GPRRegClass);
59 if (STI.getHasAlu32())
60 addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
61
62 // Compute derived properties from the register classes
64
66
71
73
77
78 // Set unsupported atomic operations as Custom so
79 // we can emit better error messages than fatal error
80 // from selectiondag.
81 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
82 if (VT == MVT::i32) {
83 if (STI.getHasAlu32())
84 continue;
85 } else {
87 }
88
94 }
95
96 for (auto VT : { MVT::i32, MVT::i64 }) {
97 if (VT == MVT::i32 && !STI.getHasAlu32())
98 continue;
99
102 if (!STI.hasSdivSmod()) {
105 }
120
124 }
125
126 if (STI.getHasAlu32()) {
129 STI.getHasJmp32() ? Custom : Promote);
130 }
131
133 if (!STI.hasMovsx()) {
137 }
138
139 // Extended load operations for i1 types must be promoted
140 for (MVT VT : MVT::integer_valuetypes()) {
144
145 if (!STI.hasLdsx()) {
147 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
148 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
149 }
150 }
151
154
155 // Function alignments
158
160 // LLVM generic code will try to expand memcpy into load/store pairs at this
161 // stage which is before quite a few IR optimization passes, therefore the
162 // loads and stores could potentially be moved apart from each other which
163 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
164 // compilers.
165 //
166 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
167 // of memcpy to later stage in IR optimization pipeline so those load/store
168 // pairs won't be touched and could be kept in order. Hence, we set
169 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
170 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
175 } else {
176 // inline memcpy() for kernel to see explicit copy
177 unsigned CommonMaxStores =
179
184 }
185
186 // CPU/Feature control
187 HasAlu32 = STI.getHasAlu32();
188 HasJmp32 = STI.getHasJmp32();
189 HasJmpExt = STI.getHasJmpExt();
190 HasMovsx = STI.hasMovsx();
191}
192
194 return false;
195}
196
197bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
198 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
199 return false;
200 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
201 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
202 return NumBits1 > NumBits2;
203}
204
205bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
206 if (!VT1.isInteger() || !VT2.isInteger())
207 return false;
208 unsigned NumBits1 = VT1.getSizeInBits();
209 unsigned NumBits2 = VT2.getSizeInBits();
210 return NumBits1 > NumBits2;
211}
212
213bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
214 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy())
215 return false;
216 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
217 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
218 return NumBits1 == 32 && NumBits2 == 64;
219}
220
221bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
222 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger())
223 return false;
224 unsigned NumBits1 = VT1.getSizeInBits();
225 unsigned NumBits2 = VT2.getSizeInBits();
226 return NumBits1 == 32 && NumBits2 == 64;
227}
228
229bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
230 EVT VT1 = Val.getValueType();
231 if (Val.getOpcode() == ISD::LOAD && VT1.isSimple() && VT2.isSimple()) {
232 MVT MT1 = VT1.getSimpleVT().SimpleTy;
233 MVT MT2 = VT2.getSimpleVT().SimpleTy;
234 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&
235 (MT2 == MVT::i32 || MT2 == MVT::i64))
236 return true;
237 }
238 return TargetLoweringBase::isZExtFree(Val, VT2);
239}
240
243 if (Constraint.size() == 1) {
244 switch (Constraint[0]) {
245 default:
246 break;
247 case 'w':
248 return C_RegisterClass;
249 }
250 }
251
252 return TargetLowering::getConstraintType(Constraint);
253}
254
255std::pair<unsigned, const TargetRegisterClass *>
257 StringRef Constraint,
258 MVT VT) const {
259 if (Constraint.size() == 1) {
260 // GCC Constraint Letters
261 switch (Constraint[0]) {
262 case 'r': // GENERAL_REGS
263 return std::make_pair(0U, &BPF::GPRRegClass);
264 case 'w':
265 if (HasAlu32)
266 return std::make_pair(0U, &BPF::GPR32RegClass);
267 break;
268 default:
269 break;
270 }
271 }
272
274}
275
276void BPFTargetLowering::ReplaceNodeResults(
278 const char *Msg;
279 uint32_t Opcode = N->getOpcode();
280 switch (Opcode) {
281 default:
282 report_fatal_error("unhandled custom legalization: " + Twine(Opcode));
287 case ISD::ATOMIC_SWAP:
289 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)
290 Msg = "unsupported atomic operation, please use 32/64 bit version";
291 else
292 Msg = "unsupported atomic operation, please use 64 bit version";
293 break;
294 }
295
296 SDLoc DL(N);
297 // We'll still produce a fatal error downstream, but this diagnostic is more
298 // user-friendly.
299 fail(DL, DAG, Msg);
300}
301
303 switch (Op.getOpcode()) {
304 default:
305 report_fatal_error("unimplemented opcode: " + Twine(Op.getOpcode()));
306 case ISD::BR_CC:
307 return LowerBR_CC(Op, DAG);
309 return LowerGlobalAddress(Op, DAG);
311 return LowerConstantPool(Op, DAG);
312 case ISD::SELECT_CC:
313 return LowerSELECT_CC(Op, DAG);
314 case ISD::SDIV:
315 case ISD::SREM:
316 return LowerSDIVSREM(Op, DAG);
318 return LowerDYNAMIC_STACKALLOC(Op, DAG);
319 }
320}
321
322// Calling Convention Implementation
323#include "BPFGenCallingConv.inc"
324
325SDValue BPFTargetLowering::LowerFormalArguments(
326 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
327 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
328 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
329 switch (CallConv) {
330 default:
331 report_fatal_error("unimplemented calling convention: " + Twine(CallConv));
332 case CallingConv::C:
334 break;
335 }
336
339
340 // Assign locations to all of the incoming arguments.
342 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
343 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
344
345 bool HasMemArgs = false;
346 for (size_t I = 0; I < ArgLocs.size(); ++I) {
347 auto &VA = ArgLocs[I];
348
349 if (VA.isRegLoc()) {
350 // Arguments passed in registers
351 EVT RegVT = VA.getLocVT();
352 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
353 switch (SimpleTy) {
354 default: {
355 std::string Str;
356 {
358 RegVT.print(OS);
359 }
360 report_fatal_error("unhandled argument type: " + Twine(Str));
361 }
362 case MVT::i32:
363 case MVT::i64:
364 Register VReg = RegInfo.createVirtualRegister(
365 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
366 RegInfo.addLiveIn(VA.getLocReg(), VReg);
367 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
368
369 // If this is an value that has been promoted to wider types, insert an
370 // assert[sz]ext to capture this, then truncate to the right size.
371 if (VA.getLocInfo() == CCValAssign::SExt)
372 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
373 DAG.getValueType(VA.getValVT()));
374 else if (VA.getLocInfo() == CCValAssign::ZExt)
375 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
376 DAG.getValueType(VA.getValVT()));
377
378 if (VA.getLocInfo() != CCValAssign::Full)
379 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
380
381 InVals.push_back(ArgValue);
382
383 break;
384 }
385 } else {
386 if (VA.isMemLoc())
387 HasMemArgs = true;
388 else
389 report_fatal_error("unhandled argument location");
390 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
391 }
392 }
393 if (HasMemArgs)
394 fail(DL, DAG, "stack arguments are not supported");
395 if (IsVarArg)
396 fail(DL, DAG, "variadic functions are not supported");
397 if (MF.getFunction().hasStructRetAttr())
398 fail(DL, DAG, "aggregate returns are not supported");
399
400 return Chain;
401}
402
403const size_t BPFTargetLowering::MaxArgs = 5;
404
405SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
406 SmallVectorImpl<SDValue> &InVals) const {
407 SelectionDAG &DAG = CLI.DAG;
408 auto &Outs = CLI.Outs;
409 auto &OutVals = CLI.OutVals;
410 auto &Ins = CLI.Ins;
411 SDValue Chain = CLI.Chain;
412 SDValue Callee = CLI.Callee;
413 bool &IsTailCall = CLI.IsTailCall;
414 CallingConv::ID CallConv = CLI.CallConv;
415 bool IsVarArg = CLI.IsVarArg;
417
418 // BPF target does not support tail call optimization.
419 IsTailCall = false;
420
421 switch (CallConv) {
422 default:
423 report_fatal_error("unsupported calling convention: " + Twine(CallConv));
425 case CallingConv::C:
426 break;
427 }
428
429 // Analyze operands of the call, assigning locations to each operand.
431 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
432
433 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
434
435 unsigned NumBytes = CCInfo.getStackSize();
436
437 if (Outs.size() > MaxArgs)
438 fail(CLI.DL, DAG, "too many arguments", Callee);
439
440 for (auto &Arg : Outs) {
441 ISD::ArgFlagsTy Flags = Arg.Flags;
442 if (!Flags.isByVal())
443 continue;
444 fail(CLI.DL, DAG, "pass by value not supported", Callee);
445 break;
446 }
447
448 auto PtrVT = getPointerTy(MF.getDataLayout());
449 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
450
451 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
452
453 // Walk arg assignments
454 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {
455 CCValAssign &VA = ArgLocs[i];
456 SDValue &Arg = OutVals[i];
457
458 // Promote the value if needed.
459 switch (VA.getLocInfo()) {
460 default:
461 report_fatal_error("unhandled location info: " + Twine(VA.getLocInfo()));
463 break;
465 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
466 break;
468 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
469 break;
471 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
472 break;
473 }
474
475 // Push arguments into RegsToPass vector
476 if (VA.isRegLoc())
477 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
478 else
479 report_fatal_error("stack arguments are not supported");
480 }
481
482 SDValue InGlue;
483
484 // Build a sequence of copy-to-reg nodes chained together with token chain and
485 // flag operands which copy the outgoing args into registers. The InGlue in
486 // necessary since all emitted instructions must be stuck together.
487 for (auto &Reg : RegsToPass) {
488 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InGlue);
489 InGlue = Chain.getValue(1);
490 }
491
492 // If the callee is a GlobalAddress node (quite common, every direct call is)
493 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
494 // Likewise ExternalSymbol -> TargetExternalSymbol.
495 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
496 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
497 G->getOffset(), 0);
498 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
499 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
500 fail(CLI.DL, DAG,
501 Twine("A call to built-in function '" + StringRef(E->getSymbol()) +
502 "' is not supported."));
503 }
504
505 // Returns a chain & a flag for retval copy to use.
506 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
508 Ops.push_back(Chain);
509 Ops.push_back(Callee);
510
511 // Add argument registers to the end of the list so that they are
512 // known live into the call.
513 for (auto &Reg : RegsToPass)
514 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
515
516 if (InGlue.getNode())
517 Ops.push_back(InGlue);
518
519 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
520 InGlue = Chain.getValue(1);
521
522 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
523
524 // Create the CALLSEQ_END node.
525 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);
526 InGlue = Chain.getValue(1);
527
528 // Handle result values, copying them out of physregs into vregs that we
529 // return.
530 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,
531 InVals);
532}
533
535BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
536 bool IsVarArg,
538 const SmallVectorImpl<SDValue> &OutVals,
539 const SDLoc &DL, SelectionDAG &DAG) const {
540 unsigned Opc = BPFISD::RET_GLUE;
541
542 // CCValAssign - represent the assignment of the return value to a location
545
546 // CCState - Info about the registers and stack slot.
547 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
548
550 fail(DL, DAG, "aggregate returns are not supported");
551 return DAG.getNode(Opc, DL, MVT::Other, Chain);
552 }
553
554 // Analize return values.
555 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
556
557 SDValue Glue;
558 SmallVector<SDValue, 4> RetOps(1, Chain);
559
560 // Copy the result values into the output registers.
561 for (size_t i = 0; i != RVLocs.size(); ++i) {
562 CCValAssign &VA = RVLocs[i];
563 if (!VA.isRegLoc())
564 report_fatal_error("stack return values are not supported");
565
566 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue);
567
568 // Guarantee that all emitted copies are stuck together,
569 // avoiding something bad.
570 Glue = Chain.getValue(1);
571 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
572 }
573
574 RetOps[0] = Chain; // Update chain.
575
576 // Add the glue if we have it.
577 if (Glue.getNode())
578 RetOps.push_back(Glue);
579
580 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
581}
582
583SDValue BPFTargetLowering::LowerCallResult(
584 SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
585 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
586 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
587
589 // Assign locations to each value returned by this call.
591 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
592
593 if (Ins.size() > 1) {
594 fail(DL, DAG, "only small returns supported");
595 for (auto &In : Ins)
596 InVals.push_back(DAG.getConstant(0, DL, In.VT));
597 return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InGlue).getValue(1);
598 }
599
600 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
601
602 // Copy all of the result registers out of their specified physreg.
603 for (auto &Val : RVLocs) {
604 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
605 Val.getValVT(), InGlue).getValue(1);
606 InGlue = Chain.getValue(2);
607 InVals.push_back(Chain.getValue(0));
608 }
609
610 return Chain;
611}
612
613static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
614 switch (CC) {
615 default:
616 break;
617 case ISD::SETULT:
618 case ISD::SETULE:
619 case ISD::SETLT:
620 case ISD::SETLE:
622 std::swap(LHS, RHS);
623 break;
624 }
625}
626
627SDValue BPFTargetLowering::LowerSDIVSREM(SDValue Op, SelectionDAG &DAG) const {
628 SDLoc DL(Op);
629 fail(DL, DAG,
630 "unsupported signed division, please convert to unsigned div/mod.");
631 return DAG.getUNDEF(Op->getValueType(0));
632}
633
634SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
635 SelectionDAG &DAG) const {
636 SDLoc DL(Op);
637 fail(DL, DAG, "unsupported dynamic stack allocation");
638 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
639 return DAG.getMergeValues(Ops, SDLoc());
640}
641
642SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
643 SDValue Chain = Op.getOperand(0);
644 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
645 SDValue LHS = Op.getOperand(2);
646 SDValue RHS = Op.getOperand(3);
647 SDValue Dest = Op.getOperand(4);
648 SDLoc DL(Op);
649
650 if (!getHasJmpExt())
651 NegateCC(LHS, RHS, CC);
652
653 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
654 DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
655}
656
657SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
658 SDValue LHS = Op.getOperand(0);
659 SDValue RHS = Op.getOperand(1);
660 SDValue TrueV = Op.getOperand(2);
661 SDValue FalseV = Op.getOperand(3);
662 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
663 SDLoc DL(Op);
664
665 if (!getHasJmpExt())
666 NegateCC(LHS, RHS, CC);
667
668 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
669 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
670 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
671
672 return DAG.getNode(BPFISD::SELECT_CC, DL, VTs, Ops);
673}
674
675const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const {
676 switch ((BPFISD::NodeType)Opcode) {
678 break;
679 case BPFISD::RET_GLUE:
680 return "BPFISD::RET_GLUE";
681 case BPFISD::CALL:
682 return "BPFISD::CALL";
684 return "BPFISD::SELECT_CC";
685 case BPFISD::BR_CC:
686 return "BPFISD::BR_CC";
687 case BPFISD::Wrapper:
688 return "BPFISD::Wrapper";
689 case BPFISD::MEMCPY:
690 return "BPFISD::MEMCPY";
691 }
692 return nullptr;
693}
694
696 SelectionDAG &DAG, unsigned Flags) {
697 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
698}
699
701 SelectionDAG &DAG, unsigned Flags) {
702 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
703 N->getOffset(), Flags);
704}
705
706template <class NodeTy>
707SDValue BPFTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
708 unsigned Flags) const {
709 SDLoc DL(N);
710
711 SDValue GA = getTargetNode(N, DL, MVT::i64, DAG, Flags);
712
713 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
714}
715
716SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
717 SelectionDAG &DAG) const {
718 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
719 if (N->getOffset() != 0)
720 report_fatal_error("invalid offset for global address: " +
721 Twine(N->getOffset()));
722 return getAddr(N, DAG);
723}
724
725SDValue BPFTargetLowering::LowerConstantPool(SDValue Op,
726 SelectionDAG &DAG) const {
727 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
728
729 return getAddr(N, DAG);
730}
731
732unsigned
733BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
734 unsigned Reg, bool isSigned) const {
736 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
737 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
738 MachineFunction *F = BB->getParent();
739 DebugLoc DL = MI.getDebugLoc();
740
741 MachineRegisterInfo &RegInfo = F->getRegInfo();
742
743 if (!isSigned) {
744 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
745 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
746 return PromotedReg0;
747 }
748 Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
749 Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
750 Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
751 if (HasMovsx) {
752 BuildMI(BB, DL, TII.get(BPF::MOVSX_rr_32), PromotedReg0).addReg(Reg);
753 } else {
754 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
755 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
756 .addReg(PromotedReg0).addImm(32);
757 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
758 .addReg(PromotedReg1).addImm(32);
759 }
760
761 return PromotedReg2;
762}
763
765BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
767 const {
768 MachineFunction *MF = MI.getParent()->getParent();
770 MachineInstrBuilder MIB(*MF, MI);
771 unsigned ScratchReg;
772
773 // This function does custom insertion during lowering BPFISD::MEMCPY which
774 // only has two register operands from memcpy semantics, the copy source
775 // address and the copy destination address.
776 //
777 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
778 // a third scratch register to serve as the destination register of load and
779 // source register of store.
780 //
781 // The scratch register here is with the Define | Dead | EarlyClobber flags.
782 // The EarlyClobber flag has the semantic property that the operand it is
783 // attached to is clobbered before the rest of the inputs are read. Hence it
784 // must be unique among the operands to the instruction. The Define flag is
785 // needed to coerce the machine verifier that an Undef value isn't a problem
786 // as we anyway is loading memory into it. The Dead flag is needed as the
787 // value in scratch isn't supposed to be used by any other instruction.
788 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
789 MIB.addReg(ScratchReg,
791
792 return BB;
793}
794
797 MachineBasicBlock *BB) const {
799 DebugLoc DL = MI.getDebugLoc();
800 unsigned Opc = MI.getOpcode();
801 bool isSelectRROp = (Opc == BPF::Select ||
802 Opc == BPF::Select_64_32 ||
803 Opc == BPF::Select_32 ||
804 Opc == BPF::Select_32_64);
805
806 bool isMemcpyOp = Opc == BPF::MEMCPY;
807
808#ifndef NDEBUG
809 bool isSelectRIOp = (Opc == BPF::Select_Ri ||
810 Opc == BPF::Select_Ri_64_32 ||
811 Opc == BPF::Select_Ri_32 ||
812 Opc == BPF::Select_Ri_32_64);
813
814 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp))
815 report_fatal_error("unhandled instruction type: " + Twine(Opc));
816#endif
817
818 if (isMemcpyOp)
819 return EmitInstrWithCustomInserterMemcpy(MI, BB);
820
821 bool is32BitCmp = (Opc == BPF::Select_32 ||
822 Opc == BPF::Select_32_64 ||
823 Opc == BPF::Select_Ri_32 ||
824 Opc == BPF::Select_Ri_32_64);
825
826 // To "insert" a SELECT instruction, we actually have to insert the diamond
827 // control-flow pattern. The incoming instruction knows the destination vreg
828 // to set, the condition code register to branch on, the true/false values to
829 // select between, and a branch opcode to use.
830 const BasicBlock *LLVM_BB = BB->getBasicBlock();
832
833 // ThisMBB:
834 // ...
835 // TrueVal = ...
836 // jmp_XX r1, r2 goto Copy1MBB
837 // fallthrough --> Copy0MBB
838 MachineBasicBlock *ThisMBB = BB;
839 MachineFunction *F = BB->getParent();
840 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
841 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
842
843 F->insert(I, Copy0MBB);
844 F->insert(I, Copy1MBB);
845 // Update machine-CFG edges by transferring all successors of the current
846 // block to the new block which will contain the Phi node for the select.
847 Copy1MBB->splice(Copy1MBB->begin(), BB,
848 std::next(MachineBasicBlock::iterator(MI)), BB->end());
850 // Next, add the true and fallthrough blocks as its successors.
851 BB->addSuccessor(Copy0MBB);
852 BB->addSuccessor(Copy1MBB);
853
854 // Insert Branch if Flag
855 int CC = MI.getOperand(3).getImm();
856 int NewCC;
857 switch (CC) {
858#define SET_NEWCC(X, Y) \
859 case ISD::X: \
860 if (is32BitCmp && HasJmp32) \
861 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
862 else \
863 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
864 break
865 SET_NEWCC(SETGT, JSGT);
866 SET_NEWCC(SETUGT, JUGT);
867 SET_NEWCC(SETGE, JSGE);
868 SET_NEWCC(SETUGE, JUGE);
869 SET_NEWCC(SETEQ, JEQ);
870 SET_NEWCC(SETNE, JNE);
871 SET_NEWCC(SETLT, JSLT);
872 SET_NEWCC(SETULT, JULT);
873 SET_NEWCC(SETLE, JSLE);
874 SET_NEWCC(SETULE, JULE);
875 default:
876 report_fatal_error("unimplemented select CondCode " + Twine(CC));
877 }
878
879 Register LHS = MI.getOperand(1).getReg();
880 bool isSignedCmp = (CC == ISD::SETGT ||
881 CC == ISD::SETGE ||
882 CC == ISD::SETLT ||
883 CC == ISD::SETLE);
884
885 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
886 // to be promoted, however if the 32-bit comparison operands are destination
887 // registers then they are implicitly zero-extended already, there is no
888 // need of explicit zero-extend sequence for them.
889 //
890 // We simply do extension for all situations in this method, but we will
891 // try to remove those unnecessary in BPFMIPeephole pass.
892 if (is32BitCmp && !HasJmp32)
893 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
894
895 if (isSelectRROp) {
896 Register RHS = MI.getOperand(2).getReg();
897
898 if (is32BitCmp && !HasJmp32)
899 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
900
901 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
902 } else {
903 int64_t imm32 = MI.getOperand(2).getImm();
904 // Check before we build J*_ri instruction.
905 if (!isInt<32>(imm32))
906 report_fatal_error("immediate overflows 32 bits: " + Twine(imm32));
907 BuildMI(BB, DL, TII.get(NewCC))
908 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
909 }
910
911 // Copy0MBB:
912 // %FalseValue = ...
913 // # fallthrough to Copy1MBB
914 BB = Copy0MBB;
915
916 // Update machine-CFG edges
917 BB->addSuccessor(Copy1MBB);
918
919 // Copy1MBB:
920 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
921 // ...
922 BB = Copy1MBB;
923 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
924 .addReg(MI.getOperand(5).getReg())
925 .addMBB(Copy0MBB)
926 .addReg(MI.getOperand(4).getReg())
927 .addMBB(ThisMBB);
928
929 MI.eraseFromParent(); // The pseudo instruction is gone now.
930 return BB;
931}
932
934 EVT VT) const {
935 return getHasAlu32() ? MVT::i32 : MVT::i64;
936}
937
939 EVT VT) const {
940 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
941}
942
943bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
944 const AddrMode &AM, Type *Ty,
945 unsigned AS,
946 Instruction *I) const {
947 // No global is ever allowed as a base.
948 if (AM.BaseGV)
949 return false;
950
951 switch (AM.Scale) {
952 case 0: // "r+i" or just "i", depending on HasBaseReg.
953 break;
954 case 1:
955 if (!AM.HasBaseReg) // allow "r+i".
956 break;
957 return false; // disallow "r+r" or "r+r+i".
958 default:
959 return false;
960 }
961
962 return true;
963}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)
#define SET_NEWCC(X, Y)
static bool isSigned(unsigned int Opcode)
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
raw_pwrite_stream & OS
Value * RHS
Value * LHS
unsigned getCommonMaxStoresPerMemFunc() const
bool hasSdivSmod() const
Definition: BPFSubtarget.h:92
bool getHasJmpExt() const
Definition: BPFSubtarget.h:85
const BPFSelectionDAGInfo * getSelectionDAGInfo() const override
Definition: BPFSubtarget.h:105
bool hasLdsx() const
Definition: BPFSubtarget.h:89
bool hasMovsx() const
Definition: BPFSubtarget.h:90
bool getHasJmp32() const
Definition: BPFSubtarget.h:86
const BPFRegisterInfo * getRegisterInfo() const override
Definition: BPFSubtarget.h:108
bool getHasAlu32() const
Definition: BPFSubtarget.h:87
BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
Return the type to use for a scalar shift opcode, given the shifted amount type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
CCState - This class holds information needed while lowering arguments and return values.
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:662
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:206
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Machine Value Type.
SimpleValueType SimpleTy
static auto integer_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:721
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:772
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:468
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:798
LLVMContext * getContext() const
Definition: SelectionDAG.h:484
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:738
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
TargetInstrInfo - Interface to description of machine instruction set.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
self_iterator getIterator()
Definition: ilist_node.h:109
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:750
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1126
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1122
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:723
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:250
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:714
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1031
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:783
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:1254
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1267
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1268
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:722
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1077
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1052
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1056
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:651
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1265
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:742
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1041
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:798
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1263
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1262
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:786
@ BRCOND
BRCOND - Conditional branch.
Definition: ISDOpcodes.h:1070
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:763
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ AssertZext
Definition: ISDOpcodes.h:62
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1523
@ Dead
Unused definition.
@ Define
Register definition.
@ EarlyClobber
Register definition happens before uses.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&... Ranges)
Concatenated range across two or more ranges.
Definition: STLExtras.h:1176
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
void print(raw_ostream &OS) const
Implement operator<<.
Definition: ValueTypes.h:481
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:151
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals