LLVM 19.0.0git
HexagonISelLowering.cpp
Go to the documentation of this file.
1//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interfaces that Hexagon uses to lower LLVM code
10// into a selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "HexagonISelLowering.h"
15#include "Hexagon.h"
17#include "HexagonRegisterInfo.h"
18#include "HexagonSubtarget.h"
21#include "llvm/ADT/APInt.h"
22#include "llvm/ADT/ArrayRef.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/CallingConv.h"
36#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/Function.h"
41#include "llvm/IR/GlobalValue.h"
42#include "llvm/IR/InlineAsm.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/IntrinsicsHexagon.h"
47#include "llvm/IR/IRBuilder.h"
48#include "llvm/IR/Module.h"
49#include "llvm/IR/Type.h"
50#include "llvm/IR/Value.h"
55#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstddef>
63#include <cstdint>
64#include <limits>
65#include <utility>
66
67using namespace llvm;
68
69#define DEBUG_TYPE "hexagon-lowering"
70
71static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
72 cl::init(true), cl::Hidden,
73 cl::desc("Control jump table emission on Hexagon target"));
74
75static cl::opt<bool>
76 EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden,
77 cl::desc("Enable Hexagon SDNode scheduling"));
78
80 cl::desc("Enable Fast Math processing"));
81
82static cl::opt<int> MinimumJumpTables("minimum-jump-tables", cl::Hidden,
83 cl::init(5),
84 cl::desc("Set minimum jump tables"));
85
86static cl::opt<int>
87 MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6),
88 cl::desc("Max #stores to inline memcpy"));
89
90static cl::opt<int>
92 cl::desc("Max #stores to inline memcpy"));
93
94static cl::opt<int>
95 MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6),
96 cl::desc("Max #stores to inline memmove"));
97
98static cl::opt<int>
100 cl::init(4),
101 cl::desc("Max #stores to inline memmove"));
102
103static cl::opt<int>
104 MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8),
105 cl::desc("Max #stores to inline memset"));
106
107static cl::opt<int>
109 cl::desc("Max #stores to inline memset"));
110
111static cl::opt<bool> AlignLoads("hexagon-align-loads",
112 cl::Hidden, cl::init(false),
113 cl::desc("Rewrite unaligned loads as a pair of aligned loads"));
114
115static cl::opt<bool>
116 DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden,
117 cl::init(false),
118 cl::desc("Disable minimum alignment of 1 for "
119 "arguments passed by value on stack"));
120
121namespace {
122
123 class HexagonCCState : public CCState {
124 unsigned NumNamedVarArgParams = 0;
125
126 public:
127 HexagonCCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
129 unsigned NumNamedArgs)
130 : CCState(CC, IsVarArg, MF, locs, C),
131 NumNamedVarArgParams(NumNamedArgs) {}
132 unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
133 };
134
135} // end anonymous namespace
136
137
138// Implement calling convention for Hexagon.
139
140static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
141 CCValAssign::LocInfo &LocInfo,
142 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
143 static const MCPhysReg ArgRegs[] = {
144 Hexagon::R0, Hexagon::R1, Hexagon::R2,
145 Hexagon::R3, Hexagon::R4, Hexagon::R5
146 };
147 const unsigned NumArgRegs = std::size(ArgRegs);
148 unsigned RegNum = State.getFirstUnallocated(ArgRegs);
149
150 // RegNum is an index into ArgRegs: skip a register if RegNum is odd.
151 if (RegNum != NumArgRegs && RegNum % 2 == 1)
152 State.AllocateReg(ArgRegs[RegNum]);
153
154 // Always return false here, as this function only makes sure that the first
155 // unallocated register has an even register number and does not actually
156 // allocate a register for the current argument.
157 return false;
158}
159
160#include "HexagonGenCallingConv.inc"
161
162
165 const {
166 return SDValue();
167}
168
169/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
170/// by "Src" to address "Dst" of size "Size". Alignment information is
171/// specified by the specific parameter attribute. The copy will be passed as
172/// a byval function parameter. Sometimes what we are copying is the end of a
173/// larger object, the part that does not fit in registers.
175 SDValue Chain, ISD::ArgFlagsTy Flags,
176 SelectionDAG &DAG, const SDLoc &dl) {
177 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
178 return DAG.getMemcpy(
179 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
180 /*isVolatile=*/false, /*AlwaysInline=*/false,
181 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
182}
183
184bool
186 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
188 LLVMContext &Context) const {
190 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
191
193 return CCInfo.CheckReturn(Outs, RetCC_Hexagon_HVX);
194 return CCInfo.CheckReturn(Outs, RetCC_Hexagon);
195}
196
197// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
198// passed by value, the function prototype is modified to return void and
199// the value is stored in memory pointed by a pointer passed by caller.
202 bool IsVarArg,
204 const SmallVectorImpl<SDValue> &OutVals,
205 const SDLoc &dl, SelectionDAG &DAG) const {
206 // CCValAssign - represent the assignment of the return value to locations.
208
209 // CCState - Info about the registers and stack slot.
210 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
211 *DAG.getContext());
212
213 // Analyze return values of ISD::RET
214 if (Subtarget.useHVXOps())
215 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon_HVX);
216 else
217 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
218
219 SDValue Glue;
220 SmallVector<SDValue, 4> RetOps(1, Chain);
221
222 // Copy the result values into the output registers.
223 for (unsigned i = 0; i != RVLocs.size(); ++i) {
224 CCValAssign &VA = RVLocs[i];
225 SDValue Val = OutVals[i];
226
227 switch (VA.getLocInfo()) {
228 default:
229 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
230 llvm_unreachable("Unknown loc info!");
232 break;
234 Val = DAG.getBitcast(VA.getLocVT(), Val);
235 break;
237 Val = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Val);
238 break;
240 Val = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Val);
241 break;
243 Val = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Val);
244 break;
245 }
246
247 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Val, Glue);
248
249 // Guarantee that all emitted copies are stuck together with flags.
250 Glue = Chain.getValue(1);
251 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
252 }
253
254 RetOps[0] = Chain; // Update chain.
255
256 // Add the glue if we have it.
257 if (Glue.getNode())
258 RetOps.push_back(Glue);
259
260 return DAG.getNode(HexagonISD::RET_GLUE, dl, MVT::Other, RetOps);
261}
262
264 // If either no tail call or told not to tail call at all, don't.
265 return CI->isTailCall();
266}
267
269 const char* RegName, LLT VT, const MachineFunction &) const {
270 // Just support r19, the linux kernel uses it.
272 .Case("r0", Hexagon::R0)
273 .Case("r1", Hexagon::R1)
274 .Case("r2", Hexagon::R2)
275 .Case("r3", Hexagon::R3)
276 .Case("r4", Hexagon::R4)
277 .Case("r5", Hexagon::R5)
278 .Case("r6", Hexagon::R6)
279 .Case("r7", Hexagon::R7)
280 .Case("r8", Hexagon::R8)
281 .Case("r9", Hexagon::R9)
282 .Case("r10", Hexagon::R10)
283 .Case("r11", Hexagon::R11)
284 .Case("r12", Hexagon::R12)
285 .Case("r13", Hexagon::R13)
286 .Case("r14", Hexagon::R14)
287 .Case("r15", Hexagon::R15)
288 .Case("r16", Hexagon::R16)
289 .Case("r17", Hexagon::R17)
290 .Case("r18", Hexagon::R18)
291 .Case("r19", Hexagon::R19)
292 .Case("r20", Hexagon::R20)
293 .Case("r21", Hexagon::R21)
294 .Case("r22", Hexagon::R22)
295 .Case("r23", Hexagon::R23)
296 .Case("r24", Hexagon::R24)
297 .Case("r25", Hexagon::R25)
298 .Case("r26", Hexagon::R26)
299 .Case("r27", Hexagon::R27)
300 .Case("r28", Hexagon::R28)
301 .Case("r29", Hexagon::R29)
302 .Case("r30", Hexagon::R30)
303 .Case("r31", Hexagon::R31)
304 .Case("r1:0", Hexagon::D0)
305 .Case("r3:2", Hexagon::D1)
306 .Case("r5:4", Hexagon::D2)
307 .Case("r7:6", Hexagon::D3)
308 .Case("r9:8", Hexagon::D4)
309 .Case("r11:10", Hexagon::D5)
310 .Case("r13:12", Hexagon::D6)
311 .Case("r15:14", Hexagon::D7)
312 .Case("r17:16", Hexagon::D8)
313 .Case("r19:18", Hexagon::D9)
314 .Case("r21:20", Hexagon::D10)
315 .Case("r23:22", Hexagon::D11)
316 .Case("r25:24", Hexagon::D12)
317 .Case("r27:26", Hexagon::D13)
318 .Case("r29:28", Hexagon::D14)
319 .Case("r31:30", Hexagon::D15)
320 .Case("sp", Hexagon::R29)
321 .Case("fp", Hexagon::R30)
322 .Case("lr", Hexagon::R31)
323 .Case("p0", Hexagon::P0)
324 .Case("p1", Hexagon::P1)
325 .Case("p2", Hexagon::P2)
326 .Case("p3", Hexagon::P3)
327 .Case("sa0", Hexagon::SA0)
328 .Case("lc0", Hexagon::LC0)
329 .Case("sa1", Hexagon::SA1)
330 .Case("lc1", Hexagon::LC1)
331 .Case("m0", Hexagon::M0)
332 .Case("m1", Hexagon::M1)
333 .Case("usr", Hexagon::USR)
334 .Case("ugp", Hexagon::UGP)
335 .Case("cs0", Hexagon::CS0)
336 .Case("cs1", Hexagon::CS1)
337 .Default(Register());
338 if (Reg)
339 return Reg;
340
341 report_fatal_error("Invalid register name global variable");
342}
343
344/// LowerCallResult - Lower the result values of an ISD::CALL into the
345/// appropriate copies out of appropriate physical registers. This assumes that
346/// Chain/Glue are the input chain/glue to use, and that TheCall is the call
347/// being lowered. Returns a SDNode with the same number of values as the
348/// ISD::CALL.
350 SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
351 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
353 const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {
354 // Assign locations to each value returned by this call.
356
357 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
358 *DAG.getContext());
359
360 if (Subtarget.useHVXOps())
361 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon_HVX);
362 else
363 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
364
365 // Copy all of the result registers out of their specified physreg.
366 for (unsigned i = 0; i != RVLocs.size(); ++i) {
367 SDValue RetVal;
368 if (RVLocs[i].getValVT() == MVT::i1) {
369 // Return values of type MVT::i1 require special handling. The reason
370 // is that MVT::i1 is associated with the PredRegs register class, but
371 // values of that type are still returned in R0. Generate an explicit
372 // copy into a predicate register from R0, and treat the value of the
373 // predicate register as the call result.
374 auto &MRI = DAG.getMachineFunction().getRegInfo();
375 SDValue FR0 = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
376 MVT::i32, Glue);
377 // FR0 = (Value, Chain, Glue)
378 Register PredR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
379 SDValue TPR = DAG.getCopyToReg(FR0.getValue(1), dl, PredR,
380 FR0.getValue(0), FR0.getValue(2));
381 // TPR = (Chain, Glue)
382 // Don't glue this CopyFromReg, because it copies from a virtual
383 // register. If it is glued to the call, InstrEmitter will add it
384 // as an implicit def to the call (EmitMachineNode).
385 RetVal = DAG.getCopyFromReg(TPR.getValue(0), dl, PredR, MVT::i1);
386 Glue = TPR.getValue(1);
387 Chain = TPR.getValue(0);
388 } else {
389 RetVal = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
390 RVLocs[i].getValVT(), Glue);
391 Glue = RetVal.getValue(2);
392 Chain = RetVal.getValue(1);
393 }
394 InVals.push_back(RetVal.getValue(0));
395 }
396
397 return Chain;
398}
399
400/// LowerCall - Functions arguments are copied from virtual regs to
401/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
404 SmallVectorImpl<SDValue> &InVals) const {
405 SelectionDAG &DAG = CLI.DAG;
406 SDLoc &dl = CLI.DL;
408 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
410 SDValue Chain = CLI.Chain;
411 SDValue Callee = CLI.Callee;
412 CallingConv::ID CallConv = CLI.CallConv;
413 bool IsVarArg = CLI.IsVarArg;
414 bool DoesNotReturn = CLI.DoesNotReturn;
415
416 bool IsStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
418 MachineFrameInfo &MFI = MF.getFrameInfo();
419 auto PtrVT = getPointerTy(MF.getDataLayout());
420
421 unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0;
422 if (GlobalAddressSDNode *GAN = dyn_cast<GlobalAddressSDNode>(Callee))
423 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32);
424
425 // Linux ABI treats var-arg calls the same way as regular ones.
426 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
427
428 // Analyze operands of the call, assigning locations to each operand.
430 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs, *DAG.getContext(),
431 NumParams);
432
433 if (Subtarget.useHVXOps())
434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_HVX);
436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_Legacy);
437 else
438 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
439
440 if (CLI.IsTailCall) {
441 bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
442 CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
443 IsVarArg, IsStructRet, StructAttrFlag, Outs,
444 OutVals, Ins, DAG);
445 for (const CCValAssign &VA : ArgLocs) {
446 if (VA.isMemLoc()) {
447 CLI.IsTailCall = false;
448 break;
449 }
450 }
451 LLVM_DEBUG(dbgs() << (CLI.IsTailCall ? "Eligible for Tail Call\n"
452 : "Argument must be passed on stack. "
453 "Not eligible for Tail Call\n"));
454 }
455 // Get a count of how many bytes are to be pushed on the stack.
456 unsigned NumBytes = CCInfo.getStackSize();
458 SmallVector<SDValue, 8> MemOpChains;
459
460 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
461 SDValue StackPtr =
462 DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
463
464 bool NeedsArgAlign = false;
465 Align LargestAlignSeen;
466 // Walk the register/memloc assignments, inserting copies/loads.
467 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
468 CCValAssign &VA = ArgLocs[i];
469 SDValue Arg = OutVals[i];
470 ISD::ArgFlagsTy Flags = Outs[i].Flags;
471 // Record if we need > 8 byte alignment on an argument.
472 bool ArgAlign = Subtarget.isHVXVectorType(VA.getValVT());
473 NeedsArgAlign |= ArgAlign;
474
475 // Promote the value if needed.
476 switch (VA.getLocInfo()) {
477 default:
478 // Loc info must be one of Full, BCvt, SExt, ZExt, or AExt.
479 llvm_unreachable("Unknown loc info!");
481 break;
483 Arg = DAG.getBitcast(VA.getLocVT(), Arg);
484 break;
486 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
487 break;
489 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
490 break;
492 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
493 break;
494 }
495
496 if (VA.isMemLoc()) {
497 unsigned LocMemOffset = VA.getLocMemOffset();
498 SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
499 StackPtr.getValueType());
500 MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
501 if (ArgAlign)
502 LargestAlignSeen = std::max(
503 LargestAlignSeen, Align(VA.getLocVT().getStoreSizeInBits() / 8));
504 if (Flags.isByVal()) {
505 // The argument is a struct passed by value. According to LLVM, "Arg"
506 // is a pointer.
507 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
508 Flags, DAG, dl));
509 } else {
511 DAG.getMachineFunction(), LocMemOffset);
512 SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI);
513 MemOpChains.push_back(S);
514 }
515 continue;
516 }
517
518 // Arguments that can be passed on register must be kept at RegsToPass
519 // vector.
520 if (VA.isRegLoc())
521 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
522 }
523
524 if (NeedsArgAlign && Subtarget.hasV60Ops()) {
525 LLVM_DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
526 Align VecAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
527 LargestAlignSeen = std::max(LargestAlignSeen, VecAlign);
528 MFI.ensureMaxAlignment(LargestAlignSeen);
529 }
530 // Transform all store nodes into one single node because all store
531 // nodes are independent of each other.
532 if (!MemOpChains.empty())
533 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
534
535 SDValue Glue;
536 if (!CLI.IsTailCall) {
537 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
538 Glue = Chain.getValue(1);
539 }
540
541 // Build a sequence of copy-to-reg nodes chained together with token
542 // chain and flag operands which copy the outgoing args into registers.
543 // The Glue is necessary since all emitted instructions must be
544 // stuck together.
545 if (!CLI.IsTailCall) {
546 for (const auto &R : RegsToPass) {
547 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);
548 Glue = Chain.getValue(1);
549 }
550 } else {
551 // For tail calls lower the arguments to the 'real' stack slot.
552 //
553 // Force all the incoming stack arguments to be loaded from the stack
554 // before any new outgoing arguments are stored to the stack, because the
555 // outgoing stack slots may alias the incoming argument stack slots, and
556 // the alias isn't otherwise explicit. This is slightly more conservative
557 // than necessary, because it means that each store effectively depends
558 // on every argument instead of just those arguments it would clobber.
559 //
560 // Do not flag preceding copytoreg stuff together with the following stuff.
561 Glue = SDValue();
562 for (const auto &R : RegsToPass) {
563 Chain = DAG.getCopyToReg(Chain, dl, R.first, R.second, Glue);
564 Glue = Chain.getValue(1);
565 }
566 Glue = SDValue();
567 }
568
569 bool LongCalls = MF.getSubtarget<HexagonSubtarget>().useLongCalls();
570 unsigned Flags = LongCalls ? HexagonII::HMOTF_ConstExtended : 0;
571
572 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
573 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
574 // node so that legalize doesn't hack it.
575 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
576 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags);
577 } else if (ExternalSymbolSDNode *S =
578 dyn_cast<ExternalSymbolSDNode>(Callee)) {
579 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags);
580 }
581
582 // Returns a chain & a flag for retval copy to use.
583 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
585 Ops.push_back(Chain);
586 Ops.push_back(Callee);
587
588 // Add argument registers to the end of the list so that they are
589 // known live into the call.
590 for (const auto &R : RegsToPass)
591 Ops.push_back(DAG.getRegister(R.first, R.second.getValueType()));
592
593 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallConv);
594 assert(Mask && "Missing call preserved mask for calling convention");
595 Ops.push_back(DAG.getRegisterMask(Mask));
596
597 if (Glue.getNode())
598 Ops.push_back(Glue);
599
600 if (CLI.IsTailCall) {
601 MFI.setHasTailCall();
602 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
603 }
604
605 // Set this here because we need to know this for "hasFP" in frame lowering.
606 // The target-independent code calls getFrameRegister before setting it, and
607 // getFrameRegister uses hasFP to determine whether the function has FP.
608 MFI.setHasCalls(true);
609
610 unsigned OpCode = DoesNotReturn ? HexagonISD::CALLnr : HexagonISD::CALL;
611 Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
612 Glue = Chain.getValue(1);
613
614 // Create the CALLSEQ_END node.
615 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, Glue, dl);
616 Glue = Chain.getValue(1);
617
618 // Handle result values, copying them out of physregs into vregs that we
619 // return.
620 return LowerCallResult(Chain, Glue, CallConv, IsVarArg, Ins, dl, DAG,
621 InVals, OutVals, Callee);
622}
623
624/// Returns true by value, base pointer and offset pointer and addressing
625/// mode by reference if this node can be combined with a load / store to
626/// form a post-indexed load / store.
629 SelectionDAG &DAG) const {
630 LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N);
631 if (!LSN)
632 return false;
633 EVT VT = LSN->getMemoryVT();
634 if (!VT.isSimple())
635 return false;
636 bool IsLegalType = VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
637 VT == MVT::i64 || VT == MVT::f32 || VT == MVT::f64 ||
638 VT == MVT::v2i16 || VT == MVT::v2i32 || VT == MVT::v4i8 ||
639 VT == MVT::v4i16 || VT == MVT::v8i8 ||
640 Subtarget.isHVXVectorType(VT.getSimpleVT());
641 if (!IsLegalType)
642 return false;
643
644 if (Op->getOpcode() != ISD::ADD)
645 return false;
646 Base = Op->getOperand(0);
647 Offset = Op->getOperand(1);
648 if (!isa<ConstantSDNode>(Offset.getNode()))
649 return false;
650 AM = ISD::POST_INC;
651
652 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
653 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V);
654}
655
658 return SDValue();
659 else
660 return Op;
661}
662
666 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
667 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
668 unsigned LR = HRI.getRARegister();
669
670 if ((Op.getOpcode() != ISD::INLINEASM &&
671 Op.getOpcode() != ISD::INLINEASM_BR) || HMFI.hasClobberLR())
672 return Op;
673
674 unsigned NumOps = Op.getNumOperands();
675 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue)
676 --NumOps; // Ignore the flag operand.
677
678 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
679 const InlineAsm::Flag Flags(Op.getConstantOperandVal(i));
680 unsigned NumVals = Flags.getNumOperandRegisters();
681 ++i; // Skip the ID value.
682
683 switch (Flags.getKind()) {
684 default:
685 llvm_unreachable("Bad flags!");
689 i += NumVals;
690 break;
694 for (; NumVals; --NumVals, ++i) {
695 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
696 if (Reg != LR)
697 continue;
698 HMFI.setHasClobberLR(true);
699 return Op;
700 }
701 break;
702 }
703 }
704 }
705
706 return Op;
707}
708
709// Need to transform ISD::PREFETCH into something that doesn't inherit
710// all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
711// SDNPMayStore.
713 SelectionDAG &DAG) const {
714 SDValue Chain = Op.getOperand(0);
715 SDValue Addr = Op.getOperand(1);
716 // Lower it to DCFETCH($reg, #0). A "pat" will try to merge the offset in,
717 // if the "reg" is fed by an "add".
718 SDLoc DL(Op);
719 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
720 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
721}
722
723// Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
724// is marked as having side-effects, while the register read on Hexagon does
725// not have any. TableGen refuses to accept the direct pattern from that node
726// to the A4_tfrcpp.
728 SelectionDAG &DAG) const {
729 SDValue Chain = Op.getOperand(0);
730 SDLoc dl(Op);
731 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
732 return DAG.getNode(HexagonISD::READCYCLE, dl, VTs, Chain);
733}
734
736 SelectionDAG &DAG) const {
737 SDValue Chain = Op.getOperand(0);
738 unsigned IntNo = Op.getConstantOperandVal(1);
739 // Lower the hexagon_prefetch builtin to DCFETCH, as above.
740 if (IntNo == Intrinsic::hexagon_prefetch) {
741 SDValue Addr = Op.getOperand(2);
742 SDLoc DL(Op);
743 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
744 return DAG.getNode(HexagonISD::DCFETCH, DL, MVT::Other, Chain, Addr, Zero);
745 }
746 return SDValue();
747}
748
751 SelectionDAG &DAG) const {
752 SDValue Chain = Op.getOperand(0);
753 SDValue Size = Op.getOperand(1);
754 SDValue Align = Op.getOperand(2);
755 SDLoc dl(Op);
756
757 ConstantSDNode *AlignConst = dyn_cast<ConstantSDNode>(Align);
758 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC");
759
760 unsigned A = AlignConst->getSExtValue();
761 auto &HFI = *Subtarget.getFrameLowering();
762 // "Zero" means natural stack alignment.
763 if (A == 0)
764 A = HFI.getStackAlign().value();
765
766 LLVM_DEBUG({
767 dbgs () << __func__ << " Align: " << A << " Size: ";
768 Size.getNode()->dump(&DAG);
769 dbgs() << "\n";
770 });
771
772 SDValue AC = DAG.getConstant(A, dl, MVT::i32);
773 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
774 SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
775
777 return AA;
778}
779
781 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
782 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
783 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
785 MachineFrameInfo &MFI = MF.getFrameInfo();
787
788 // Linux ABI treats var-arg calls the same way as regular ones.
789 bool TreatAsVarArg = !Subtarget.isEnvironmentMusl() && IsVarArg;
790
791 // Assign locations to all of the incoming arguments.
793 HexagonCCState CCInfo(CallConv, TreatAsVarArg, MF, ArgLocs,
794 *DAG.getContext(),
796
797 if (Subtarget.useHVXOps())
798 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_HVX);
800 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon_Legacy);
801 else
802 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
803
804 // For LLVM, in the case when returning a struct by value (>8byte),
805 // the first argument is a pointer that points to the location on caller's
806 // stack where the return value will be stored. For Hexagon, the location on
807 // caller's stack is passed only when the struct size is smaller than (and
808 // equal to) 8 bytes. If not, no address will be passed into callee and
809 // callee return the result direclty through R0/R1.
810 auto NextSingleReg = [] (const TargetRegisterClass &RC, unsigned Reg) {
811 switch (RC.getID()) {
812 case Hexagon::IntRegsRegClassID:
813 return Reg - Hexagon::R0 + 1;
814 case Hexagon::DoubleRegsRegClassID:
815 return (Reg - Hexagon::D0 + 1) * 2;
816 case Hexagon::HvxVRRegClassID:
817 return Reg - Hexagon::V0 + 1;
818 case Hexagon::HvxWRRegClassID:
819 return (Reg - Hexagon::W0 + 1) * 2;
820 }
821 llvm_unreachable("Unexpected register class");
822 };
823
824 auto &HFL = const_cast<HexagonFrameLowering&>(*Subtarget.getFrameLowering());
825 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
826 HFL.FirstVarArgSavedReg = 0;
828
829 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
830 CCValAssign &VA = ArgLocs[i];
831 ISD::ArgFlagsTy Flags = Ins[i].Flags;
832 bool ByVal = Flags.isByVal();
833
834 // Arguments passed in registers:
835 // 1. 32- and 64-bit values and HVX vectors are passed directly,
836 // 2. Large structs are passed via an address, and the address is
837 // passed in a register.
838 if (VA.isRegLoc() && ByVal && Flags.getByValSize() <= 8)
839 llvm_unreachable("ByValSize must be bigger than 8 bytes");
840
841 bool InReg = VA.isRegLoc() &&
842 (!ByVal || (ByVal && Flags.getByValSize() > 8));
843
844 if (InReg) {
845 MVT RegVT = VA.getLocVT();
846 if (VA.getLocInfo() == CCValAssign::BCvt)
847 RegVT = VA.getValVT();
848
849 const TargetRegisterClass *RC = getRegClassFor(RegVT);
850 Register VReg = MRI.createVirtualRegister(RC);
851 SDValue Copy = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
852
853 // Treat values of type MVT::i1 specially: they are passed in
854 // registers of type i32, but they need to remain as values of
855 // type i1 for consistency of the argument lowering.
856 if (VA.getValVT() == MVT::i1) {
857 assert(RegVT.getSizeInBits() <= 32);
858 SDValue T = DAG.getNode(ISD::AND, dl, RegVT,
859 Copy, DAG.getConstant(1, dl, RegVT));
860 Copy = DAG.getSetCC(dl, MVT::i1, T, DAG.getConstant(0, dl, RegVT),
861 ISD::SETNE);
862 } else {
863#ifndef NDEBUG
864 unsigned RegSize = RegVT.getSizeInBits();
865 assert(RegSize == 32 || RegSize == 64 ||
866 Subtarget.isHVXVectorType(RegVT));
867#endif
868 }
869 InVals.push_back(Copy);
870 MRI.addLiveIn(VA.getLocReg(), VReg);
871 HFL.FirstVarArgSavedReg = NextSingleReg(*RC, VA.getLocReg());
872 } else {
873 assert(VA.isMemLoc() && "Argument should be passed in memory");
874
875 // If it's a byval parameter, then we need to compute the
876 // "real" size, not the size of the pointer.
877 unsigned ObjSize = Flags.isByVal()
878 ? Flags.getByValSize()
879 : VA.getLocVT().getStoreSizeInBits() / 8;
880
881 // Create the frame index object for this incoming parameter.
883 int FI = MFI.CreateFixedObject(ObjSize, Offset, true);
884 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
885
886 if (Flags.isByVal()) {
887 // If it's a pass-by-value aggregate, then do not dereference the stack
888 // location. Instead, we should generate a reference to the stack
889 // location.
890 InVals.push_back(FIN);
891 } else {
892 SDValue L = DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
894 InVals.push_back(L);
895 }
896 }
897 }
898
899 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
900 for (int i = HFL.FirstVarArgSavedReg; i < 6; i++)
901 MRI.addLiveIn(Hexagon::R0+i);
902 }
903
904 if (IsVarArg && Subtarget.isEnvironmentMusl()) {
905 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1);
906 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects()));
907
908 // Create Frame index for the start of register saved area.
909 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg;
910 bool RequiresPadding = (NumVarArgRegs & 1);
911 int RegSaveAreaSizePlusPadding = RequiresPadding
912 ? (NumVarArgRegs + 1) * 4
913 : NumVarArgRegs * 4;
914
915 if (RegSaveAreaSizePlusPadding > 0) {
916 // The offset to saved register area should be 8 byte aligned.
917 int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getStackSize();
918 if (!(RegAreaStart % 8))
919 RegAreaStart = (RegAreaStart + 7) & -8;
920
921 int RegSaveAreaFrameIndex =
922 MFI.CreateFixedObject(RegSaveAreaSizePlusPadding, RegAreaStart, true);
923 HMFI.setRegSavedAreaStartFrameIndex(RegSaveAreaFrameIndex);
924
925 // This will point to the next argument passed via stack.
926 int Offset = RegAreaStart + RegSaveAreaSizePlusPadding;
927 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
928 HMFI.setVarArgsFrameIndex(FI);
929 } else {
930 // This will point to the next argument passed via stack, when
931 // there is no saved register area.
932 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize();
933 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
934 HMFI.setRegSavedAreaStartFrameIndex(FI);
935 HMFI.setVarArgsFrameIndex(FI);
936 }
937 }
938
939
940 if (IsVarArg && !Subtarget.isEnvironmentMusl()) {
941 // This will point to the next argument passed via stack.
942 int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize();
943 int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true);
944 HMFI.setVarArgsFrameIndex(FI);
945 }
946
947 return Chain;
948}
949
952 // VASTART stores the address of the VarArgsFrameIndex slot into the
953 // memory location argument.
956 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
957 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
958
959 if (!Subtarget.isEnvironmentMusl()) {
960 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr, Op.getOperand(1),
962 }
963 auto &FuncInfo = *MF.getInfo<HexagonMachineFunctionInfo>();
964 auto &HFL = *Subtarget.getFrameLowering();
965 SDLoc DL(Op);
967
968 // Get frame index of va_list.
969 SDValue FIN = Op.getOperand(1);
970
971 // If first Vararg register is odd, add 4 bytes to start of
972 // saved register area to point to the first register location.
973 // This is because the saved register area has to be 8 byte aligned.
974 // Incase of an odd start register, there will be 4 bytes of padding in
975 // the beginning of saved register area. If all registers area used up,
976 // the following condition will handle it correctly.
977 SDValue SavedRegAreaStartFrameIndex =
978 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(), MVT::i32);
979
980 auto PtrVT = getPointerTy(DAG.getDataLayout());
981
982 if (HFL.FirstVarArgSavedReg & 1)
983 SavedRegAreaStartFrameIndex =
984 DAG.getNode(ISD::ADD, DL, PtrVT,
985 DAG.getFrameIndex(FuncInfo.getRegSavedAreaStartFrameIndex(),
986 MVT::i32),
987 DAG.getIntPtrConstant(4, DL));
988
989 // Store the saved register area start pointer.
990 SDValue Store =
991 DAG.getStore(Op.getOperand(0), DL,
992 SavedRegAreaStartFrameIndex,
993 FIN, MachinePointerInfo(SV));
994 MemOps.push_back(Store);
995
996 // Store saved register area end pointer.
997 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
998 FIN, DAG.getIntPtrConstant(4, DL));
999 Store = DAG.getStore(Op.getOperand(0), DL,
1000 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
1001 PtrVT),
1002 FIN, MachinePointerInfo(SV, 4));
1003 MemOps.push_back(Store);
1004
1005 // Store overflow area pointer.
1006 FIN = DAG.getNode(ISD::ADD, DL, PtrVT,
1007 FIN, DAG.getIntPtrConstant(4, DL));
1008 Store = DAG.getStore(Op.getOperand(0), DL,
1009 DAG.getFrameIndex(FuncInfo.getVarArgsFrameIndex(),
1010 PtrVT),
1011 FIN, MachinePointerInfo(SV, 8));
1012 MemOps.push_back(Store);
1013
1014 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
1015}
1016
1017SDValue
1019 // Assert that the linux ABI is enabled for the current compilation.
1020 assert(Subtarget.isEnvironmentMusl() && "Linux ABI should be enabled");
1021 SDValue Chain = Op.getOperand(0);
1022 SDValue DestPtr = Op.getOperand(1);
1023 SDValue SrcPtr = Op.getOperand(2);
1024 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
1025 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
1026 SDLoc DL(Op);
1027 // Size of the va_list is 12 bytes as it has 3 pointers. Therefore,
1028 // we need to memcopy 12 bytes from va_list to another similar list.
1029 return DAG.getMemcpy(Chain, DL, DestPtr, SrcPtr,
1030 DAG.getIntPtrConstant(12, DL), Align(4),
1031 /*isVolatile*/ false, false, false,
1032 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
1033}
1034
1036 const SDLoc &dl(Op);
1037 SDValue LHS = Op.getOperand(0);
1038 SDValue RHS = Op.getOperand(1);
1039 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
1040 MVT ResTy = ty(Op);
1041 MVT OpTy = ty(LHS);
1042
1043 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1044 MVT ElemTy = OpTy.getVectorElementType();
1045 assert(ElemTy.isScalarInteger());
1047 OpTy.getVectorNumElements());
1048 return DAG.getSetCC(dl, ResTy,
1049 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), WideTy),
1050 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), WideTy), CC);
1051 }
1052
1053 // Treat all other vector types as legal.
1054 if (ResTy.isVector())
1055 return Op;
1056
1057 // Comparisons of short integers should use sign-extend, not zero-extend,
1058 // since we can represent small negative values in the compare instructions.
1059 // The LLVM default is to use zero-extend arbitrarily in these cases.
1060 auto isSExtFree = [this](SDValue N) {
1061 switch (N.getOpcode()) {
1062 case ISD::TRUNCATE: {
1063 // A sign-extend of a truncate of a sign-extend is free.
1064 SDValue Op = N.getOperand(0);
1065 if (Op.getOpcode() != ISD::AssertSext)
1066 return false;
1067 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT();
1068 unsigned ThisBW = ty(N).getSizeInBits();
1069 unsigned OrigBW = OrigTy.getSizeInBits();
1070 // The type that was sign-extended to get the AssertSext must be
1071 // narrower than the type of N (so that N has still the same value
1072 // as the original).
1073 return ThisBW >= OrigBW;
1074 }
1075 case ISD::LOAD:
1076 // We have sign-extended loads.
1077 return true;
1078 }
1079 return false;
1080 };
1081
1082 if (OpTy == MVT::i8 || OpTy == MVT::i16) {
1083 ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS);
1084 bool IsNegative = C && C->getAPIntValue().isNegative();
1085 if (IsNegative || isSExtFree(LHS) || isSExtFree(RHS))
1086 return DAG.getSetCC(dl, ResTy,
1087 DAG.getSExtOrTrunc(LHS, SDLoc(LHS), MVT::i32),
1088 DAG.getSExtOrTrunc(RHS, SDLoc(RHS), MVT::i32), CC);
1089 }
1090
1091 return SDValue();
1092}
1093
1094SDValue
1096 SDValue PredOp = Op.getOperand(0);
1097 SDValue Op1 = Op.getOperand(1), Op2 = Op.getOperand(2);
1098 MVT OpTy = ty(Op1);
1099 const SDLoc &dl(Op);
1100
1101 if (OpTy == MVT::v2i16 || OpTy == MVT::v4i8) {
1102 MVT ElemTy = OpTy.getVectorElementType();
1103 assert(ElemTy.isScalarInteger());
1105 OpTy.getVectorNumElements());
1106 // Generate (trunc (select (_, sext, sext))).
1107 return DAG.getSExtOrTrunc(
1108 DAG.getSelect(dl, WideTy, PredOp,
1109 DAG.getSExtOrTrunc(Op1, dl, WideTy),
1110 DAG.getSExtOrTrunc(Op2, dl, WideTy)),
1111 dl, OpTy);
1112 }
1113
1114 return SDValue();
1115}
1116
1117SDValue
1119 EVT ValTy = Op.getValueType();
1120 ConstantPoolSDNode *CPN = cast<ConstantPoolSDNode>(Op);
1121 Constant *CVal = nullptr;
1122 bool isVTi1Type = false;
1123 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) {
1124 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) {
1125 IRBuilder<> IRB(CV->getContext());
1127 unsigned VecLen = CV->getNumOperands();
1128 assert(isPowerOf2_32(VecLen) &&
1129 "conversion only supported for pow2 VectorSize");
1130 for (unsigned i = 0; i < VecLen; ++i)
1131 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue()));
1132
1133 CVal = ConstantVector::get(NewConst);
1134 isVTi1Type = true;
1135 }
1136 }
1137 Align Alignment = CPN->getAlign();
1138 bool IsPositionIndependent = isPositionIndependent();
1139 unsigned char TF = IsPositionIndependent ? HexagonII::MO_PCREL : 0;
1140
1141 unsigned Offset = 0;
1142 SDValue T;
1143 if (CPN->isMachineConstantPoolEntry())
1144 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Alignment,
1145 Offset, TF);
1146 else if (isVTi1Type)
1147 T = DAG.getTargetConstantPool(CVal, ValTy, Alignment, Offset, TF);
1148 else
1149 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Alignment, Offset,
1150 TF);
1151
1152 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF &&
1153 "Inconsistent target flag encountered");
1154
1155 if (IsPositionIndependent)
1156 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), ValTy, T);
1157 return DAG.getNode(HexagonISD::CP, SDLoc(Op), ValTy, T);
1158}
1159
1160SDValue
1162 EVT VT = Op.getValueType();
1163 int Idx = cast<JumpTableSDNode>(Op)->getIndex();
1164 if (isPositionIndependent()) {
1166 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), VT, T);
1167 }
1168
1169 SDValue T = DAG.getTargetJumpTable(Idx, VT);
1170 return DAG.getNode(HexagonISD::JT, SDLoc(Op), VT, T);
1171}
1172
1173SDValue
1175 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1177 MachineFrameInfo &MFI = MF.getFrameInfo();
1178 MFI.setReturnAddressIsTaken(true);
1179
1181 return SDValue();
1182
1183 EVT VT = Op.getValueType();
1184 SDLoc dl(Op);
1185 unsigned Depth = Op.getConstantOperandVal(0);
1186 if (Depth) {
1187 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
1188 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
1189 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
1190 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
1192 }
1193
1194 // Return LR, which contains the return address. Mark it an implicit live-in.
1195 Register Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
1196 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1197}
1198
1199SDValue
1201 const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo();
1203 MFI.setFrameAddressIsTaken(true);
1204
1205 EVT VT = Op.getValueType();
1206 SDLoc dl(Op);
1207 unsigned Depth = Op.getConstantOperandVal(0);
1208 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1209 HRI.getFrameRegister(), VT);
1210 while (Depth--)
1211 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1213 return FrameAddr;
1214}
1215
1216SDValue
1218 SDLoc dl(Op);
1219 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1220}
1221
1222SDValue
1224 SDLoc dl(Op);
1225 auto *GAN = cast<GlobalAddressSDNode>(Op);
1226 auto PtrVT = getPointerTy(DAG.getDataLayout());
1227 auto *GV = GAN->getGlobal();
1228 int64_t Offset = GAN->getOffset();
1229
1230 auto &HLOF = *HTM.getObjFileLowering();
1232
1233 if (RM == Reloc::Static) {
1234 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
1235 const GlobalObject *GO = GV->getAliaseeObject();
1236 if (GO && Subtarget.useSmallData() && HLOF.isGlobalInSmallSection(GO, HTM))
1237 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, GA);
1238 return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, GA);
1239 }
1240
1241 bool UsePCRel = getTargetMachine().shouldAssumeDSOLocal(GV);
1242 if (UsePCRel) {
1243 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset,
1245 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, GA);
1246 }
1247
1248 // Use GOT index.
1249 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
1250 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, HexagonII::MO_GOT);
1251 SDValue Off = DAG.getConstant(Offset, dl, MVT::i32);
1252 return DAG.getNode(HexagonISD::AT_GOT, dl, PtrVT, GOT, GA, Off);
1253}
1254
1255// Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
1256SDValue
1258 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1259 SDLoc dl(Op);
1260 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1261
1263 if (RM == Reloc::Static) {
1264 SDValue A = DAG.getTargetBlockAddress(BA, PtrVT);
1265 return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, A);
1266 }
1267
1269 return DAG.getNode(HexagonISD::AT_PCREL, dl, PtrVT, A);
1270}
1271
1272SDValue
1274 const {
1275 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1278 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Op), PtrVT, GOTSym);
1279}
1280
1281SDValue
1283 GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
1284 unsigned char OperandFlags) const {
1286 MachineFrameInfo &MFI = MF.getFrameInfo();
1287 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1288 SDLoc dl(GA);
1289 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
1290 GA->getValueType(0),
1291 GA->getOffset(),
1292 OperandFlags);
1293 // Create Operands for the call.The Operands should have the following:
1294 // 1. Chain SDValue
1295 // 2. Callee which in this case is the Global address value.
1296 // 3. Registers live into the call.In this case its R0, as we
1297 // have just one argument to be passed.
1298 // 4. Glue.
1299 // Note: The order is important.
1300
1301 const auto &HRI = *Subtarget.getRegisterInfo();
1302 const uint32_t *Mask = HRI.getCallPreservedMask(MF, CallingConv::C);
1303 assert(Mask && "Missing call preserved mask for calling convention");
1304 SDValue Ops[] = { Chain, TGA, DAG.getRegister(Hexagon::R0, PtrVT),
1305 DAG.getRegisterMask(Mask), Glue };
1306 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
1307
1308 // Inform MFI that function has calls.
1309 MFI.setAdjustsStack(true);
1310
1311 Glue = Chain.getValue(1);
1312 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
1313}
1314
1315//
1316// Lower using the intial executable model for TLS addresses
1317//
1318SDValue
1320 SelectionDAG &DAG) const {
1321 SDLoc dl(GA);
1322 int64_t Offset = GA->getOffset();
1323 auto PtrVT = getPointerTy(DAG.getDataLayout());
1324
1325 // Get the thread pointer.
1326 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1327
1328 bool IsPositionIndependent = isPositionIndependent();
1329 unsigned char TF =
1330 IsPositionIndependent ? HexagonII::MO_IEGOT : HexagonII::MO_IE;
1331
1332 // First generate the TLS symbol address
1333 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT,
1334 Offset, TF);
1335
1336 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1337
1338 if (IsPositionIndependent) {
1339 // Generate the GOT pointer in case of position independent code
1341
1342 // Add the TLS Symbol address to GOT pointer.This gives
1343 // GOT relative relocation for the symbol.
1344 Sym = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1345 }
1346
1347 // Load the offset value for TLS symbol.This offset is relative to
1348 // thread pointer.
1349 SDValue LoadOffset =
1350 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Sym, MachinePointerInfo());
1351
1352 // Address of the thread local variable is the add of thread
1353 // pointer and the offset of the variable.
1354 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, LoadOffset);
1355}
1356
1357//
1358// Lower using the local executable model for TLS addresses
1359//
1360SDValue
1362 SelectionDAG &DAG) const {
1363 SDLoc dl(GA);
1364 int64_t Offset = GA->getOffset();
1365 auto PtrVT = getPointerTy(DAG.getDataLayout());
1366
1367 // Get the thread pointer.
1368 SDValue TP = DAG.getCopyFromReg(DAG.getEntryNode(), dl, Hexagon::UGP, PtrVT);
1369 // Generate the TLS symbol address
1370 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1372 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1373
1374 // Address of the thread local variable is the add of thread
1375 // pointer and the offset of the variable.
1376 return DAG.getNode(ISD::ADD, dl, PtrVT, TP, Sym);
1377}
1378
1379//
1380// Lower using the general dynamic model for TLS addresses
1381//
1382SDValue
1384 SelectionDAG &DAG) const {
1385 SDLoc dl(GA);
1386 int64_t Offset = GA->getOffset();
1387 auto PtrVT = getPointerTy(DAG.getDataLayout());
1388
1389 // First generate the TLS symbol address
1390 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset,
1392
1393 // Then, generate the GOT pointer
1394 SDValue GOT = LowerGLOBAL_OFFSET_TABLE(TGA, DAG);
1395
1396 // Add the TLS symbol and the GOT pointer
1397 SDValue Sym = DAG.getNode(HexagonISD::CONST32, dl, PtrVT, TGA);
1398 SDValue Chain = DAG.getNode(ISD::ADD, dl, PtrVT, GOT, Sym);
1399
1400 // Copy over the argument to R0
1401 SDValue InGlue;
1402 Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, Hexagon::R0, Chain, InGlue);
1403 InGlue = Chain.getValue(1);
1404
1405 unsigned Flags = DAG.getSubtarget<HexagonSubtarget>().useLongCalls()
1408
1409 return GetDynamicTLSAddr(DAG, Chain, GA, InGlue, PtrVT,
1410 Hexagon::R0, Flags);
1411}
1412
1413//
1414// Lower TLS addresses.
1415//
1416// For now for dynamic models, we only support the general dynamic model.
1417//
1418SDValue
1420 SelectionDAG &DAG) const {
1421 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1422
1423 switch (HTM.getTLSModel(GA->getGlobal())) {
1426 return LowerToTLSGeneralDynamicModel(GA, DAG);
1428 return LowerToTLSInitialExecModel(GA, DAG);
1430 return LowerToTLSLocalExecModel(GA, DAG);
1431 }
1432 llvm_unreachable("Bogus TLS model");
1433}
1434
1435//===----------------------------------------------------------------------===//
1436// TargetLowering Implementation
1437//===----------------------------------------------------------------------===//
1438
1440 const HexagonSubtarget &ST)
1441 : TargetLowering(TM), HTM(static_cast<const HexagonTargetMachine&>(TM)),
1442 Subtarget(ST) {
1443 auto &HRI = *Subtarget.getRegisterInfo();
1444
1448 setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
1451
1454
1457 else
1459
1460 // Limits for inline expansion of memcpy/memmove
1467
1468 //
1469 // Set up register classes.
1470 //
1471
1472 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1473 addRegisterClass(MVT::v2i1, &Hexagon::PredRegsRegClass); // bbbbaaaa
1474 addRegisterClass(MVT::v4i1, &Hexagon::PredRegsRegClass); // ddccbbaa
1475 addRegisterClass(MVT::v8i1, &Hexagon::PredRegsRegClass); // hgfedcba
1476 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1477 addRegisterClass(MVT::v2i16, &Hexagon::IntRegsRegClass);
1478 addRegisterClass(MVT::v4i8, &Hexagon::IntRegsRegClass);
1479 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1480 addRegisterClass(MVT::v8i8, &Hexagon::DoubleRegsRegClass);
1481 addRegisterClass(MVT::v4i16, &Hexagon::DoubleRegsRegClass);
1482 addRegisterClass(MVT::v2i32, &Hexagon::DoubleRegsRegClass);
1483
1484 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1485 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1486
1487 //
1488 // Handling of scalar operations.
1489 //
1490 // All operations default to "legal", except:
1491 // - indexed loads and stores (pre-/post-incremented),
1492 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS,
1493 // ConstantFP, DEBUGTRAP, FCEIL, FCOPYSIGN, FEXP, FEXP2, FFLOOR, FGETSIGN,
1494 // FLOG, FLOG2, FLOG10, FMAXNUM, FMINNUM, FNEARBYINT, FRINT, FROUND, TRAP,
1495 // FTRUNC, PREFETCH, SIGN_EXTEND_VECTOR_INREG, ZERO_EXTEND_VECTOR_INREG,
1496 // which default to "expand" for at least one type.
1497
1498 // Misc operations.
1501 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1515
1516 // Custom legalize GlobalAddress nodes into CONST32.
1520
1521 // Hexagon needs to optimize cases with negative constants.
1525 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1526
1527 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1529 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1530 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1531 if (Subtarget.isEnvironmentMusl())
1533 else
1535
1539
1540 if (EmitJumpTables)
1542 else
1543 setMinimumJumpTableEntries(std::numeric_limits<unsigned>::max());
1544 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1545
1546 for (unsigned LegalIntOp :
1548 setOperationAction(LegalIntOp, MVT::i32, Legal);
1549 setOperationAction(LegalIntOp, MVT::i64, Legal);
1550 }
1551
1552 // Hexagon has A4_addp_c and A4_subp_c that take and generate a carry bit,
1553 // but they only operate on i64.
1554 for (MVT VT : MVT::integer_valuetypes()) {
1561 }
1564
1569
1570 // Popcount can count # of 1s in i64 but returns i32.
1575
1580
1585
1586 for (unsigned IntExpOp :
1591 for (MVT VT : MVT::integer_valuetypes())
1592 setOperationAction(IntExpOp, VT, Expand);
1593 }
1594
1595 for (unsigned FPExpOp :
1598 for (MVT VT : MVT::fp_valuetypes())
1599 setOperationAction(FPExpOp, VT, Expand);
1600 }
1601
1602 // No extending loads from i32.
1603 for (MVT VT : MVT::integer_valuetypes()) {
1604 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
1605 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
1606 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
1607 }
1608 // Turn FP truncstore into trunc + store.
1609 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1610 // Turn FP extload into load/fpextend.
1611 for (MVT VT : MVT::fp_valuetypes())
1612 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1613
1614 // Expand BR_CC and SELECT_CC for all integer and fp types.
1615 for (MVT VT : MVT::integer_valuetypes()) {
1618 }
1619 for (MVT VT : MVT::fp_valuetypes()) {
1622 }
1623 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
1624
1625 //
1626 // Handling of vector operations.
1627 //
1628
1629 // Set the action for vector operations to "expand", then override it with
1630 // either "custom" or "legal" for specific cases.
1631 static const unsigned VectExpOps[] = {
1632 // Integer arithmetic:
1636 // Logical/bit:
1639 // Floating point arithmetic/math functions:
1646 // Misc:
1648 // Vector:
1654 };
1655
1657 for (unsigned VectExpOp : VectExpOps)
1658 setOperationAction(VectExpOp, VT, Expand);
1659
1660 // Expand all extending loads and truncating stores:
1661 for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) {
1662 if (TargetVT == VT)
1663 continue;
1664 setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
1665 setLoadExtAction(ISD::ZEXTLOAD, TargetVT, VT, Expand);
1666 setLoadExtAction(ISD::SEXTLOAD, TargetVT, VT, Expand);
1667 setTruncStoreAction(VT, TargetVT, Expand);
1668 }
1669
1670 // Normalize all inputs to SELECT to be vectors of i32.
1671 if (VT.getVectorElementType() != MVT::i32) {
1672 MVT VT32 = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
1674 AddPromotedToType(ISD::SELECT, VT, VT32);
1675 }
1679 }
1680
1681 // Extending loads from (native) vectors of i8 into (native) vectors of i16
1682 // are legal.
1683 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1684 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1685 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, MVT::v2i8, Legal);
1686 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1687 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1688 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, MVT::v4i8, Legal);
1689
1693
1694 // Types natively supported:
1695 for (MVT NativeVT : {MVT::v8i1, MVT::v4i1, MVT::v2i1, MVT::v4i8,
1696 MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1703
1704 setOperationAction(ISD::ADD, NativeVT, Legal);
1705 setOperationAction(ISD::SUB, NativeVT, Legal);
1706 setOperationAction(ISD::MUL, NativeVT, Legal);
1707 setOperationAction(ISD::AND, NativeVT, Legal);
1708 setOperationAction(ISD::OR, NativeVT, Legal);
1709 setOperationAction(ISD::XOR, NativeVT, Legal);
1710
1711 if (NativeVT.getVectorElementType() != MVT::i1) {
1715 }
1716 }
1717
1718 for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32}) {
1723 }
1724
1725 // Custom lower unaligned loads.
1726 // Also, for both loads and stores, verify the alignment of the address
1727 // in case it is a compile-time constant. This is a usability feature to
1728 // provide a meaningful error message to users.
1729 for (MVT VT : {MVT::i16, MVT::i32, MVT::v4i8, MVT::i64, MVT::v8i8,
1730 MVT::v2i16, MVT::v4i16, MVT::v2i32}) {
1733 }
1734
1735 // Custom-lower load/stores of boolean vectors.
1736 for (MVT VT : {MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1739 }
1740
1741 // Normalize integer compares to EQ/GT/UGT
1742 for (MVT VT : {MVT::v2i16, MVT::v4i8, MVT::v8i8, MVT::v2i32, MVT::v4i16,
1743 MVT::v2i32}) {
1751 }
1752
1753 // Normalize boolean compares to [U]LE/[U]LT
1754 for (MVT VT : {MVT::i1, MVT::v2i1, MVT::v4i1, MVT::v8i1}) {
1759 }
1760
1761 // Custom-lower bitcasts from i8 to v8i1.
1763 setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
1769
1770 // V5+.
1776
1779
1792
1793 // Special handling for half-precision floating point conversions.
1794 // Lower half float conversions into library calls.
1799
1800 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
1801 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
1802 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1803 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1804
1805 // Handling of indexed loads/stores: default is "expand".
1806 //
1807 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f32, MVT::f64,
1808 MVT::v2i16, MVT::v2i32, MVT::v4i8, MVT::v4i16, MVT::v8i8}) {
1811 }
1812
1813 // Subtarget-specific operation actions.
1814 //
1815 if (Subtarget.hasV60Ops()) {
1820 }
1821 if (Subtarget.hasV66Ops()) {
1824 }
1825 if (Subtarget.hasV67Ops()) {
1829 }
1830
1834
1835 if (Subtarget.useHVXOps())
1836 initializeHVXLowering();
1837
1839
1840 //
1841 // Library calls for unsupported operations
1842 //
1843 bool FastMath = EnableFastMath;
1844
1845 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1846 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1847 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1848 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1849 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1850 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1851 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1852 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1853
1854 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1855 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1856 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1857 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1858 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1859 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1860
1861 // This is the only fast library function for sqrtd.
1862 if (FastMath)
1863 setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
1864
1865 // Prefix is: nothing for "slow-math",
1866 // "fast2_" for V5+ fast-math double-precision
1867 // (actually, keep fast-math and fast-math2 separate for now)
1868 if (FastMath) {
1869 setLibcallName(RTLIB::ADD_F64, "__hexagon_fast_adddf3");
1870 setLibcallName(RTLIB::SUB_F64, "__hexagon_fast_subdf3");
1871 setLibcallName(RTLIB::MUL_F64, "__hexagon_fast_muldf3");
1872 setLibcallName(RTLIB::DIV_F64, "__hexagon_fast_divdf3");
1873 setLibcallName(RTLIB::DIV_F32, "__hexagon_fast_divsf3");
1874 } else {
1875 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1876 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1877 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1878 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1879 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1880 }
1881
1882 if (FastMath)
1883 setLibcallName(RTLIB::SQRT_F32, "__hexagon_fast2_sqrtf");
1884 else
1885 setLibcallName(RTLIB::SQRT_F32, "__hexagon_sqrtf");
1886
1887 // Routines to handle fp16 storage type.
1888 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1889 setLibcallName(RTLIB::FPROUND_F64_F16, "__truncdfhf2");
1890 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1891
1892 // These cause problems when the shift amount is non-constant.
1893 setLibcallName(RTLIB::SHL_I128, nullptr);
1894 setLibcallName(RTLIB::SRL_I128, nullptr);
1895 setLibcallName(RTLIB::SRA_I128, nullptr);
1896}
1897
1898const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1899 switch ((HexagonISD::NodeType)Opcode) {
1900 case HexagonISD::ADDC: return "HexagonISD::ADDC";
1901 case HexagonISD::SUBC: return "HexagonISD::SUBC";
1902 case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
1903 case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
1904 case HexagonISD::AT_PCREL: return "HexagonISD::AT_PCREL";
1905 case HexagonISD::BARRIER: return "HexagonISD::BARRIER";
1906 case HexagonISD::CALL: return "HexagonISD::CALL";
1907 case HexagonISD::CALLnr: return "HexagonISD::CALLnr";
1908 case HexagonISD::CALLR: return "HexagonISD::CALLR";
1909 case HexagonISD::COMBINE: return "HexagonISD::COMBINE";
1910 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1911 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1912 case HexagonISD::CP: return "HexagonISD::CP";
1913 case HexagonISD::DCFETCH: return "HexagonISD::DCFETCH";
1914 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1915 case HexagonISD::TSTBIT: return "HexagonISD::TSTBIT";
1916 case HexagonISD::EXTRACTU: return "HexagonISD::EXTRACTU";
1917 case HexagonISD::INSERT: return "HexagonISD::INSERT";
1918 case HexagonISD::JT: return "HexagonISD::JT";
1919 case HexagonISD::RET_GLUE: return "HexagonISD::RET_GLUE";
1920 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1921 case HexagonISD::VASL: return "HexagonISD::VASL";
1922 case HexagonISD::VASR: return "HexagonISD::VASR";
1923 case HexagonISD::VLSR: return "HexagonISD::VLSR";
1924 case HexagonISD::MFSHL: return "HexagonISD::MFSHL";
1925 case HexagonISD::MFSHR: return "HexagonISD::MFSHR";
1926 case HexagonISD::SSAT: return "HexagonISD::SSAT";
1927 case HexagonISD::USAT: return "HexagonISD::USAT";
1928 case HexagonISD::SMUL_LOHI: return "HexagonISD::SMUL_LOHI";
1929 case HexagonISD::UMUL_LOHI: return "HexagonISD::UMUL_LOHI";
1930 case HexagonISD::USMUL_LOHI: return "HexagonISD::USMUL_LOHI";
1931 case HexagonISD::VEXTRACTW: return "HexagonISD::VEXTRACTW";
1932 case HexagonISD::VINSERTW0: return "HexagonISD::VINSERTW0";
1933 case HexagonISD::VROR: return "HexagonISD::VROR";
1934 case HexagonISD::READCYCLE: return "HexagonISD::READCYCLE";
1935 case HexagonISD::PTRUE: return "HexagonISD::PTRUE";
1936 case HexagonISD::PFALSE: return "HexagonISD::PFALSE";
1937 case HexagonISD::D2P: return "HexagonISD::D2P";
1938 case HexagonISD::P2D: return "HexagonISD::P2D";
1939 case HexagonISD::V2Q: return "HexagonISD::V2Q";
1940 case HexagonISD::Q2V: return "HexagonISD::Q2V";
1941 case HexagonISD::QCAT: return "HexagonISD::QCAT";
1942 case HexagonISD::QTRUE: return "HexagonISD::QTRUE";
1943 case HexagonISD::QFALSE: return "HexagonISD::QFALSE";
1944 case HexagonISD::TL_EXTEND: return "HexagonISD::TL_EXTEND";
1945 case HexagonISD::TL_TRUNCATE: return "HexagonISD::TL_TRUNCATE";
1946 case HexagonISD::TYPECAST: return "HexagonISD::TYPECAST";
1947 case HexagonISD::VALIGN: return "HexagonISD::VALIGN";
1948 case HexagonISD::VALIGNADDR: return "HexagonISD::VALIGNADDR";
1949 case HexagonISD::ISEL: return "HexagonISD::ISEL";
1950 case HexagonISD::OP_END: break;
1951 }
1952 return nullptr;
1953}
1954
1955bool
1956HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign,
1957 const SDLoc &dl, SelectionDAG &DAG) const {
1958 auto *CA = dyn_cast<ConstantSDNode>(Ptr);
1959 if (!CA)
1960 return true;
1961 unsigned Addr = CA->getZExtValue();
1962 Align HaveAlign =
1963 Addr != 0 ? Align(1ull << llvm::countr_zero(Addr)) : NeedAlign;
1964 if (HaveAlign >= NeedAlign)
1965 return true;
1966
1967 static int DK_MisalignedTrap = llvm::getNextAvailablePluginDiagnosticKind();
1968
1969 struct DiagnosticInfoMisalignedTrap : public DiagnosticInfo {
1970 DiagnosticInfoMisalignedTrap(StringRef M)
1971 : DiagnosticInfo(DK_MisalignedTrap, DS_Remark), Msg(M) {}
1972 void print(DiagnosticPrinter &DP) const override {
1973 DP << Msg;
1974 }
1975 static bool classof(const DiagnosticInfo *DI) {
1976 return DI->getKind() == DK_MisalignedTrap;
1977 }
1978 StringRef Msg;
1979 };
1980
1981 std::string ErrMsg;
1982 raw_string_ostream O(ErrMsg);
1983 O << "Misaligned constant address: " << format_hex(Addr, 10)
1984 << " has alignment " << HaveAlign.value()
1985 << ", but the memory access requires " << NeedAlign.value();
1986 if (DebugLoc DL = dl.getDebugLoc())
1987 DL.print(O << ", at ");
1988 O << ". The instruction has been replaced with a trap.";
1989
1990 DAG.getContext()->diagnose(DiagnosticInfoMisalignedTrap(O.str()));
1991 return false;
1992}
1993
1994SDValue
1995HexagonTargetLowering::replaceMemWithUndef(SDValue Op, SelectionDAG &DAG)
1996 const {
1997 const SDLoc &dl(Op);
1998 auto *LS = cast<LSBaseSDNode>(Op.getNode());
1999 assert(!LS->isIndexed() && "Not expecting indexed ops on constant address");
2000
2001 SDValue Chain = LS->getChain();
2002 SDValue Trap = DAG.getNode(ISD::TRAP, dl, MVT::Other, Chain);
2003 if (LS->getOpcode() == ISD::LOAD)
2004 return DAG.getMergeValues({DAG.getUNDEF(ty(Op)), Trap}, dl);
2005 return Trap;
2006}
2007
2008// Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
2009// intrinsic.
2010static bool isBrevLdIntrinsic(const Value *Inst) {
2011 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
2012 return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
2013 ID == Intrinsic::hexagon_L2_loadri_pbr ||
2014 ID == Intrinsic::hexagon_L2_loadrh_pbr ||
2015 ID == Intrinsic::hexagon_L2_loadruh_pbr ||
2016 ID == Intrinsic::hexagon_L2_loadrb_pbr ||
2017 ID == Intrinsic::hexagon_L2_loadrub_pbr);
2018}
2019
2020// Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
2021// instruction. So far we only handle bitcast, extract value and bit reverse
2022// load intrinsic instructions. Should we handle CGEP ?
2024 if (Operator::getOpcode(V) == Instruction::ExtractValue ||
2025 Operator::getOpcode(V) == Instruction::BitCast)
2026 V = cast<Operator>(V)->getOperand(0);
2027 else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V))
2028 V = cast<Instruction>(V)->getOperand(0);
2029 return V;
2030}
2031
2032// Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
2033// a back edge. If the back edge comes from the intrinsic itself, the incoming
2034// edge is returned.
2035static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
2036 const BasicBlock *Parent = PN->getParent();
2037 int Idx = -1;
2038 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
2039 BasicBlock *Blk = PN->getIncomingBlock(i);
2040 // Determine if the back edge is originated from intrinsic.
2041 if (Blk == Parent) {
2042 Value *BackEdgeVal = PN->getIncomingValue(i);
2043 Value *BaseVal;
2044 // Loop over till we return the same Value or we hit the IntrBaseVal.
2045 do {
2046 BaseVal = BackEdgeVal;
2047 BackEdgeVal = getBrevLdObject(BackEdgeVal);
2048 } while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
2049 // If the getBrevLdObject returns IntrBaseVal, we should return the
2050 // incoming edge.
2051 if (IntrBaseVal == BackEdgeVal)
2052 continue;
2053 Idx = i;
2054 break;
2055 } else // Set the node to incoming edge.
2056 Idx = i;
2057 }
2058 assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");
2059 return PN->getIncomingValue(Idx);
2060}
2061
2062// Bit-reverse Load Intrinsic: Figure out the underlying object the base
2063// pointer points to, for the bit-reverse load intrinsic. Setting this to
2064// memoperand might help alias analysis to figure out the dependencies.
2066 Value *IntrBaseVal = V;
2067 Value *BaseVal;
2068 // Loop over till we return the same Value, implies we either figure out
2069 // the object or we hit a PHI
2070 do {
2071 BaseVal = V;
2072 V = getBrevLdObject(V);
2073 } while (BaseVal != V);
2074
2075 // Identify the object from PHINode.
2076 if (const PHINode *PN = dyn_cast<PHINode>(V))
2077 return returnEdge(PN, IntrBaseVal);
2078 // For non PHI nodes, the object is the last value returned by getBrevLdObject
2079 else
2080 return V;
2081}
2082
2083/// Given an intrinsic, checks if on the target the intrinsic will need to map
2084/// to a MemIntrinsicNode (touches memory). If this is the case, it returns
2085/// true and store the intrinsic information into the IntrinsicInfo that was
2086/// passed to the function.
2088 const CallInst &I,
2089 MachineFunction &MF,
2090 unsigned Intrinsic) const {
2091 switch (Intrinsic) {
2092 case Intrinsic::hexagon_L2_loadrd_pbr:
2093 case Intrinsic::hexagon_L2_loadri_pbr:
2094 case Intrinsic::hexagon_L2_loadrh_pbr:
2095 case Intrinsic::hexagon_L2_loadruh_pbr:
2096 case Intrinsic::hexagon_L2_loadrb_pbr:
2097 case Intrinsic::hexagon_L2_loadrub_pbr: {
2099 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
2100 auto &Cont = I.getCalledFunction()->getParent()->getContext();
2101 // The intrinsic function call is of the form { ElTy, i8* }
2102 // @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
2103 // should be derived from ElTy.
2104 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0);
2105 Info.memVT = MVT::getVT(ElTy);
2106 llvm::Value *BasePtrVal = I.getOperand(0);
2107 Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
2108 // The offset value comes through Modifier register. For now, assume the
2109 // offset is 0.
2110 Info.offset = 0;
2111 Info.align = DL.getABITypeAlign(Info.memVT.getTypeForEVT(Cont));
2113 return true;
2114 }
2115 case Intrinsic::hexagon_V6_vgathermw:
2116 case Intrinsic::hexagon_V6_vgathermw_128B:
2117 case Intrinsic::hexagon_V6_vgathermh:
2118 case Intrinsic::hexagon_V6_vgathermh_128B:
2119 case Intrinsic::hexagon_V6_vgathermhw:
2120 case Intrinsic::hexagon_V6_vgathermhw_128B:
2121 case Intrinsic::hexagon_V6_vgathermwq:
2122 case Intrinsic::hexagon_V6_vgathermwq_128B:
2123 case Intrinsic::hexagon_V6_vgathermhq:
2124 case Intrinsic::hexagon_V6_vgathermhq_128B:
2125 case Intrinsic::hexagon_V6_vgathermhwq:
2126 case Intrinsic::hexagon_V6_vgathermhwq_128B: {
2127 const Module &M = *I.getParent()->getParent()->getParent();
2129 Type *VecTy = I.getArgOperand(1)->getType();
2130 Info.memVT = MVT::getVT(VecTy);
2131 Info.ptrVal = I.getArgOperand(0);
2132 Info.offset = 0;
2133 Info.align =
2134 MaybeAlign(M.getDataLayout().getTypeAllocSizeInBits(VecTy) / 8);
2138 return true;
2139 }
2140 default:
2141 break;
2142 }
2143 return false;
2144}
2145
2147 return X.getValueType().isScalarInteger(); // 'tstbit'
2148}
2149
2151 return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
2152}
2153
2155 if (!VT1.isSimple() || !VT2.isSimple())
2156 return false;
2157 return VT1.getSimpleVT() == MVT::i64 && VT2.getSimpleVT() == MVT::i32;
2158}
2159
2161 const MachineFunction &MF, EVT VT) const {
2163}
2164
2165// Should we expand the build vector with shuffles?
2167 unsigned DefinedValues) const {
2168 return false;
2169}
2170
2172 unsigned Index) const {
2174 if (!ResVT.isSimple() || !SrcVT.isSimple())
2175 return false;
2176
2177 MVT ResTy = ResVT.getSimpleVT(), SrcTy = SrcVT.getSimpleVT();
2178 if (ResTy.getVectorElementType() != MVT::i1)
2179 return true;
2180
2181 // Non-HVX bool vectors are relatively cheap.
2182 return SrcTy.getVectorNumElements() <= 8;
2183}
2184
2186 return Op.getOpcode() == ISD::CONCAT_VECTORS ||
2188}
2189
2191 EVT VT) const {
2192 return true;
2193}
2194
2197 unsigned VecLen = VT.getVectorMinNumElements();
2198 MVT ElemTy = VT.getVectorElementType();
2199
2200 if (VecLen == 1 || VT.isScalableVector())
2202
2203 if (Subtarget.useHVXOps()) {
2204 unsigned Action = getPreferredHvxVectorAction(VT);
2205 if (Action != ~0u)
2206 return static_cast<TargetLoweringBase::LegalizeTypeAction>(Action);
2207 }
2208
2209 // Always widen (remaining) vectors of i1.
2210 if (ElemTy == MVT::i1)
2212 // Widen non-power-of-2 vectors. Such types cannot be split right now,
2213 // and computeRegisterProperties will override "split" with "widen",
2214 // which can cause other issues.
2215 if (!isPowerOf2_32(VecLen))
2217
2219}
2220
2223 if (Subtarget.useHVXOps()) {
2224 unsigned Action = getCustomHvxOperationAction(Op);
2225 if (Action != ~0u)
2226 return static_cast<TargetLoweringBase::LegalizeAction>(Action);
2227 }
2229}
2230
2231std::pair<SDValue, int>
2232HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {
2233 if (Addr.getOpcode() == ISD::ADD) {
2234 SDValue Op1 = Addr.getOperand(1);
2235 if (auto *CN = dyn_cast<const ConstantSDNode>(Op1.getNode()))
2236 return { Addr.getOperand(0), CN->getSExtValue() };
2237 }
2238 return { Addr, 0 };
2239}
2240
2241// Lower a vector shuffle (V1, V2, V3). V1 and V2 are the two vectors
2242// to select data from, V3 is the permutation.
2243SDValue
2245 const {
2246 const auto *SVN = cast<ShuffleVectorSDNode>(Op);
2247 ArrayRef<int> AM = SVN->getMask();
2248 assert(AM.size() <= 8 && "Unexpected shuffle mask");
2249 unsigned VecLen = AM.size();
2250
2251 MVT VecTy = ty(Op);
2252 assert(!Subtarget.isHVXVectorType(VecTy, true) &&
2253 "HVX shuffles should be legal");
2254 assert(VecTy.getSizeInBits() <= 64 && "Unexpected vector length");
2255
2256 SDValue Op0 = Op.getOperand(0);
2257 SDValue Op1 = Op.getOperand(1);
2258 const SDLoc &dl(Op);
2259
2260 // If the inputs are not the same as the output, bail. This is not an
2261 // error situation, but complicates the handling and the default expansion
2262 // (into BUILD_VECTOR) should be adequate.
2263 if (ty(Op0) != VecTy || ty(Op1) != VecTy)
2264 return SDValue();
2265
2266 // Normalize the mask so that the first non-negative index comes from
2267 // the first operand.
2268 SmallVector<int,8> Mask(AM.begin(), AM.end());
2269 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data();
2270 if (F == AM.size())
2271 return DAG.getUNDEF(VecTy);
2272 if (AM[F] >= int(VecLen)) {
2274 std::swap(Op0, Op1);
2275 }
2276
2277 // Express the shuffle mask in terms of bytes.
2278 SmallVector<int,8> ByteMask;
2279 unsigned ElemBytes = VecTy.getVectorElementType().getSizeInBits() / 8;
2280 for (int M : Mask) {
2281 if (M < 0) {
2282 for (unsigned j = 0; j != ElemBytes; ++j)
2283 ByteMask.push_back(-1);
2284 } else {
2285 for (unsigned j = 0; j != ElemBytes; ++j)
2286 ByteMask.push_back(M*ElemBytes + j);
2287 }
2288 }
2289 assert(ByteMask.size() <= 8);
2290
2291 // All non-undef (non-negative) indexes are well within [0..127], so they
2292 // fit in a single byte. Build two 64-bit words:
2293 // - MaskIdx where each byte is the corresponding index (for non-negative
2294 // indexes), and 0xFF for negative indexes, and
2295 // - MaskUnd that has 0xFF for each negative index.
2296 uint64_t MaskIdx = 0;
2297 uint64_t MaskUnd = 0;
2298 for (unsigned i = 0, e = ByteMask.size(); i != e; ++i) {
2299 unsigned S = 8*i;
2300 uint64_t M = ByteMask[i] & 0xFF;
2301 if (M == 0xFF)
2302 MaskUnd |= M << S;
2303 MaskIdx |= M << S;
2304 }
2305
2306 if (ByteMask.size() == 4) {
2307 // Identity.
2308 if (MaskIdx == (0x03020100 | MaskUnd))
2309 return Op0;
2310 // Byte swap.
2311 if (MaskIdx == (0x00010203 | MaskUnd)) {
2312 SDValue T0 = DAG.getBitcast(MVT::i32, Op0);
2313 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i32, T0);
2314 return DAG.getBitcast(VecTy, T1);
2315 }
2316
2317 // Byte packs.
2318 SDValue Concat10 =
2319 getCombine(Op1, Op0, dl, typeJoin({ty(Op1), ty(Op0)}), DAG);
2320 if (MaskIdx == (0x06040200 | MaskUnd))
2321 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat10}, DAG);
2322 if (MaskIdx == (0x07050301 | MaskUnd))
2323 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat10}, DAG);
2324
2325 SDValue Concat01 =
2326 getCombine(Op0, Op1, dl, typeJoin({ty(Op0), ty(Op1)}), DAG);
2327 if (MaskIdx == (0x02000604 | MaskUnd))
2328 return getInstr(Hexagon::S2_vtrunehb, dl, VecTy, {Concat01}, DAG);
2329 if (MaskIdx == (0x03010705 | MaskUnd))
2330 return getInstr(Hexagon::S2_vtrunohb, dl, VecTy, {Concat01}, DAG);
2331 }
2332
2333 if (ByteMask.size() == 8) {
2334 // Identity.
2335 if (MaskIdx == (0x0706050403020100ull | MaskUnd))
2336 return Op0;
2337 // Byte swap.
2338 if (MaskIdx == (0x0001020304050607ull | MaskUnd)) {
2339 SDValue T0 = DAG.getBitcast(MVT::i64, Op0);
2340 SDValue T1 = DAG.getNode(ISD::BSWAP, dl, MVT::i64, T0);
2341 return DAG.getBitcast(VecTy, T1);
2342 }
2343
2344 // Halfword picks.
2345 if (MaskIdx == (0x0d0c050409080100ull | MaskUnd))
2346 return getInstr(Hexagon::S2_shuffeh, dl, VecTy, {Op1, Op0}, DAG);
2347 if (MaskIdx == (0x0f0e07060b0a0302ull | MaskUnd))
2348 return getInstr(Hexagon::S2_shuffoh, dl, VecTy, {Op1, Op0}, DAG);
2349 if (MaskIdx == (0x0d0c090805040100ull | MaskUnd))
2350 return getInstr(Hexagon::S2_vtrunewh, dl, VecTy, {Op1, Op0}, DAG);
2351 if (MaskIdx == (0x0f0e0b0a07060302ull | MaskUnd))
2352 return getInstr(Hexagon::S2_vtrunowh, dl, VecTy, {Op1, Op0}, DAG);
2353 if (MaskIdx == (0x0706030205040100ull | MaskUnd)) {
2354 VectorPair P = opSplit(Op0, dl, DAG);
2355 return getInstr(Hexagon::S2_packhl, dl, VecTy, {P.second, P.first}, DAG);
2356 }
2357
2358 // Byte packs.
2359 if (MaskIdx == (0x0e060c040a020800ull | MaskUnd))
2360 return getInstr(Hexagon::S2_shuffeb, dl, VecTy, {Op1, Op0}, DAG);
2361 if (MaskIdx == (0x0f070d050b030901ull | MaskUnd))
2362 return getInstr(Hexagon::S2_shuffob, dl, VecTy, {Op1, Op0}, DAG);
2363 }
2364
2365 return SDValue();
2366}
2367
2368SDValue
2369HexagonTargetLowering::getSplatValue(SDValue Op, SelectionDAG &DAG) const {
2370 switch (Op.getOpcode()) {
2371 case ISD::BUILD_VECTOR:
2372 if (SDValue S = cast<BuildVectorSDNode>(Op)->getSplatValue())
2373 return S;
2374 break;
2375 case ISD::SPLAT_VECTOR:
2376 return Op.getOperand(0);
2377 }
2378 return SDValue();
2379}
2380
2381// Create a Hexagon-specific node for shifting a vector by an integer.
2382SDValue
2383HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
2384 const {
2385 unsigned NewOpc;
2386 switch (Op.getOpcode()) {
2387 case ISD::SHL:
2388 NewOpc = HexagonISD::VASL;
2389 break;
2390 case ISD::SRA:
2391 NewOpc = HexagonISD::VASR;
2392 break;
2393 case ISD::SRL:
2394 NewOpc = HexagonISD::VLSR;
2395 break;
2396 default:
2397 llvm_unreachable("Unexpected shift opcode");
2398 }
2399
2400 if (SDValue Sp = getSplatValue(Op.getOperand(1), DAG))
2401 return DAG.getNode(NewOpc, SDLoc(Op), ty(Op), Op.getOperand(0), Sp);
2402 return SDValue();
2403}
2404
2405SDValue
2407 const SDLoc &dl(Op);
2408
2409 // First try to convert the shift (by vector) to a shift by a scalar.
2410 // If we first split the shift, the shift amount will become 'extract
2411 // subvector', and will no longer be recognized as scalar.
2412 SDValue Res = Op;
2413 if (SDValue S = getVectorShiftByInt(Op, DAG))
2414 Res = S;
2415
2416 unsigned Opc = Res.getOpcode();
2417 switch (Opc) {
2418 case HexagonISD::VASR:
2419 case HexagonISD::VLSR:
2420 case HexagonISD::VASL:
2421 break;
2422 default:
2423 // No instructions for shifts by non-scalars.
2424 return SDValue();
2425 }
2426
2427 MVT ResTy = ty(Res);
2428 if (ResTy.getVectorElementType() != MVT::i8)
2429 return Res;
2430
2431 // For shifts of i8, extend the inputs to i16, then truncate back to i8.
2432 assert(ResTy.getVectorElementType() == MVT::i8);
2433 SDValue Val = Res.getOperand(0), Amt = Res.getOperand(1);
2434
2435 auto ShiftPartI8 = [&dl, &DAG, this](unsigned Opc, SDValue V, SDValue A) {
2436 MVT Ty = ty(V);
2437 MVT ExtTy = MVT::getVectorVT(MVT::i16, Ty.getVectorNumElements());
2438 SDValue ExtV = Opc == HexagonISD::VASR ? DAG.getSExtOrTrunc(V, dl, ExtTy)
2439 : DAG.getZExtOrTrunc(V, dl, ExtTy);
2440 SDValue ExtS = DAG.getNode(Opc, dl, ExtTy, {ExtV, A});
2441 return DAG.getZExtOrTrunc(ExtS, dl, Ty);
2442 };
2443
2444 if (ResTy.getSizeInBits() == 32)
2445 return ShiftPartI8(Opc, Val, Amt);
2446
2447 auto [LoV, HiV] = opSplit(Val, dl, DAG);
2448 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResTy,
2449 {ShiftPartI8(Opc, LoV, Amt), ShiftPartI8(Opc, HiV, Amt)});
2450}
2451
2452SDValue
2454 if (isa<ConstantSDNode>(Op.getOperand(1).getNode()))
2455 return Op;
2456 return SDValue();
2457}
2458
2459SDValue
2461 MVT ResTy = ty(Op);
2462 SDValue InpV = Op.getOperand(0);
2463 MVT InpTy = ty(InpV);
2464 assert(ResTy.getSizeInBits() == InpTy.getSizeInBits());
2465 const SDLoc &dl(Op);
2466
2467 // Handle conversion from i8 to v8i1.
2468 if (InpTy == MVT::i8) {
2469 if (ResTy == MVT::v8i1) {
2470 SDValue Sc = DAG.getBitcast(tyScalar(InpTy), InpV);
2471 SDValue Ext = DAG.getZExtOrTrunc(Sc, dl, MVT::i32);
2472 return getInstr(Hexagon::C2_tfrrp, dl, ResTy, Ext, DAG);
2473 }
2474 return SDValue();
2475 }
2476
2477 return Op;
2478}
2479
2480bool
2481HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
2482 MVT VecTy, SelectionDAG &DAG,
2483 MutableArrayRef<ConstantInt*> Consts) const {
2484 MVT ElemTy = VecTy.getVectorElementType();
2485 unsigned ElemWidth = ElemTy.getSizeInBits();
2486 IntegerType *IntTy = IntegerType::get(*DAG.getContext(), ElemWidth);
2487 bool AllConst = true;
2488
2489 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2490 SDValue V = Values[i];
2491 if (V.isUndef()) {
2492 Consts[i] = ConstantInt::get(IntTy, 0);
2493 continue;
2494 }
2495 // Make sure to always cast to IntTy.
2496 if (auto *CN = dyn_cast<ConstantSDNode>(V.getNode())) {
2497 const ConstantInt *CI = CN->getConstantIntValue();
2498 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue());
2499 } else if (auto *CN = dyn_cast<ConstantFPSDNode>(V.getNode())) {
2500 const ConstantFP *CF = CN->getConstantFPValue();
2502 Consts[i] = ConstantInt::get(IntTy, A.getZExtValue());
2503 } else {
2504 AllConst = false;
2505 }
2506 }
2507 return AllConst;
2508}
2509
2510SDValue
2511HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
2512 MVT VecTy, SelectionDAG &DAG) const {
2513 MVT ElemTy = VecTy.getVectorElementType();
2514 assert(VecTy.getVectorNumElements() == Elem.size());
2515
2516 SmallVector<ConstantInt*,4> Consts(Elem.size());
2517 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2518
2519 unsigned First, Num = Elem.size();
2520 for (First = 0; First != Num; ++First) {
2521 if (!isUndef(Elem[First]))
2522 break;
2523 }
2524 if (First == Num)
2525 return DAG.getUNDEF(VecTy);
2526
2527 if (AllConst &&
2528 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2529 return getZero(dl, VecTy, DAG);
2530
2531 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2532 assert(Elem.size() == 2);
2533 if (AllConst) {
2534 // The 'Consts' array will have all values as integers regardless
2535 // of the vector element type.
2536 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) |
2537 Consts[1]->getZExtValue() << 16;
2538 return DAG.getBitcast(VecTy, DAG.getConstant(V, dl, MVT::i32));
2539 }
2540 SDValue E0, E1;
2541 if (ElemTy == MVT::f16) {
2542 E0 = DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Elem[0]), dl, MVT::i32);
2543 E1 = DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Elem[1]), dl, MVT::i32);
2544 } else {
2545 E0 = Elem[0];
2546 E1 = Elem[1];
2547 }
2548 SDValue N = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {E1, E0}, DAG);
2549 return DAG.getBitcast(VecTy, N);
2550 }
2551
2552 if (ElemTy == MVT::i8) {
2553 // First try generating a constant.
2554 if (AllConst) {
2555 int32_t V = (Consts[0]->getZExtValue() & 0xFF) |
2556 (Consts[1]->getZExtValue() & 0xFF) << 8 |
2557 (Consts[2]->getZExtValue() & 0xFF) << 16 |
2558 Consts[3]->getZExtValue() << 24;
2559 return DAG.getBitcast(MVT::v4i8, DAG.getConstant(V, dl, MVT::i32));
2560 }
2561
2562 // Then try splat.
2563 bool IsSplat = true;
2564 for (unsigned i = First+1; i != Num; ++i) {
2565 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2566 continue;
2567 IsSplat = false;
2568 break;
2569 }
2570 if (IsSplat) {
2571 // Legalize the operand of SPLAT_VECTOR.
2572 SDValue Ext = DAG.getZExtOrTrunc(Elem[First], dl, MVT::i32);
2573 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext);
2574 }
2575
2576 // Generate
2577 // (zxtb(Elem[0]) | (zxtb(Elem[1]) << 8)) |
2578 // (zxtb(Elem[2]) | (zxtb(Elem[3]) << 8)) << 16
2579 assert(Elem.size() == 4);
2580 SDValue Vs[4];
2581 for (unsigned i = 0; i != 4; ++i) {
2582 Vs[i] = DAG.getZExtOrTrunc(Elem[i], dl, MVT::i32);
2583 Vs[i] = DAG.getZeroExtendInReg(Vs[i], dl, MVT::i8);
2584 }
2585 SDValue S8 = DAG.getConstant(8, dl, MVT::i32);
2586 SDValue T0 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[1], S8});
2587 SDValue T1 = DAG.getNode(ISD::SHL, dl, MVT::i32, {Vs[3], S8});
2588 SDValue B0 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[0], T0});
2589 SDValue B1 = DAG.getNode(ISD::OR, dl, MVT::i32, {Vs[2], T1});
2590
2591 SDValue R = getInstr(Hexagon::A2_combine_ll, dl, MVT::i32, {B1, B0}, DAG);
2592 return DAG.getBitcast(MVT::v4i8, R);
2593 }
2594
2595#ifndef NDEBUG
2596 dbgs() << "VecTy: " << VecTy << '\n';
2597#endif
2598 llvm_unreachable("Unexpected vector element type");
2599}
2600
2601SDValue
2602HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
2603 MVT VecTy, SelectionDAG &DAG) const {
2604 MVT ElemTy = VecTy.getVectorElementType();
2605 assert(VecTy.getVectorNumElements() == Elem.size());
2606
2607 SmallVector<ConstantInt*,8> Consts(Elem.size());
2608 bool AllConst = getBuildVectorConstInts(Elem, VecTy, DAG, Consts);
2609
2610 unsigned First, Num = Elem.size();
2611 for (First = 0; First != Num; ++First) {
2612 if (!isUndef(Elem[First]))
2613 break;
2614 }
2615 if (First == Num)
2616 return DAG.getUNDEF(VecTy);
2617
2618 if (AllConst &&
2619 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); }))
2620 return getZero(dl, VecTy, DAG);
2621
2622 // First try splat if possible.
2623 if (ElemTy == MVT::i16 || ElemTy == MVT::f16) {
2624 bool IsSplat = true;
2625 for (unsigned i = First+1; i != Num; ++i) {
2626 if (Elem[i] == Elem[First] || isUndef(Elem[i]))
2627 continue;
2628 IsSplat = false;
2629 break;
2630 }
2631 if (IsSplat) {
2632 // Legalize the operand of SPLAT_VECTOR
2633 SDValue S = ElemTy == MVT::f16 ? DAG.getBitcast(MVT::i16, Elem[First])
2634 : Elem[First];
2635 SDValue Ext = DAG.getZExtOrTrunc(S, dl, MVT::i32);
2636 return DAG.getNode(ISD::SPLAT_VECTOR, dl, VecTy, Ext);
2637 }
2638 }
2639
2640 // Then try constant.
2641 if (AllConst) {
2642 uint64_t Val = 0;
2643 unsigned W = ElemTy.getSizeInBits();
2644 uint64_t Mask = (1ull << W) - 1;
2645 for (unsigned i = 0; i != Num; ++i)
2646 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask);
2647 SDValue V0 = DAG.getConstant(Val, dl, MVT::i64);
2648 return DAG.getBitcast(VecTy, V0);
2649 }
2650
2651 // Build two 32-bit vectors and concatenate.
2652 MVT HalfTy = MVT::getVectorVT(ElemTy, Num/2);
2653 SDValue L = (ElemTy == MVT::i32)
2654 ? Elem[0]
2655 : buildVector32(Elem.take_front(Num/2), dl, HalfTy, DAG);
2656 SDValue H = (ElemTy == MVT::i32)
2657 ? Elem[1]
2658 : buildVector32(Elem.drop_front(Num/2), dl, HalfTy, DAG);
2659 return getCombine(H, L, dl, VecTy, DAG);
2660}
2661
2662SDValue
2663HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
2664 const SDLoc &dl, MVT ValTy, MVT ResTy,
2665 SelectionDAG &DAG) const {
2666 MVT VecTy = ty(VecV);
2667 assert(!ValTy.isVector() ||
2668 VecTy.getVectorElementType() == ValTy.getVectorElementType());
2669 if (VecTy.getVectorElementType() == MVT::i1)
2670 return extractVectorPred(VecV, IdxV, dl, ValTy, ResTy, DAG);
2671
2672 unsigned VecWidth = VecTy.getSizeInBits();
2673 unsigned ValWidth = ValTy.getSizeInBits();
2674 unsigned ElemWidth = VecTy.getVectorElementType().getSizeInBits();
2675 assert((VecWidth % ElemWidth) == 0);
2676 assert(VecWidth == 32 || VecWidth == 64);
2677
2678 // Cast everything to scalar integer types.
2679 MVT ScalarTy = tyScalar(VecTy);
2680 VecV = DAG.getBitcast(ScalarTy, VecV);
2681
2682 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2683 SDValue ExtV;
2684
2685 if (auto *IdxN = dyn_cast<ConstantSDNode>(IdxV)) {
2686 unsigned Off = IdxN->getZExtValue() * ElemWidth;
2687 if (VecWidth == 64 && ValWidth == 32) {
2688 assert(Off == 0 || Off == 32);
2689 ExtV = Off == 0 ? LoHalf(VecV, DAG) : HiHalf(VecV, DAG);
2690 } else if (Off == 0 && (ValWidth % 8) == 0) {
2691 ExtV = DAG.getZeroExtendInReg(VecV, dl, tyScalar(ValTy));
2692 } else {
2693 SDValue OffV = DAG.getConstant(Off, dl, MVT::i32);
2694 // The return type of EXTRACTU must be the same as the type of the
2695 // input vector.
2696 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2697 {VecV, WidthV, OffV});
2698 }
2699 } else {
2700 if (ty(IdxV) != MVT::i32)
2701 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2702 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2703 DAG.getConstant(ElemWidth, dl, MVT::i32));
2704 ExtV = DAG.getNode(HexagonISD::EXTRACTU, dl, ScalarTy,
2705 {VecV, WidthV, OffV});
2706 }
2707
2708 // Cast ExtV to the requested result type.
2709 ExtV = DAG.getZExtOrTrunc(ExtV, dl, tyScalar(ResTy));
2710 ExtV = DAG.getBitcast(ResTy, ExtV);
2711 return ExtV;
2712}
2713
2714SDValue
2715HexagonTargetLowering::extractVectorPred(SDValue VecV, SDValue IdxV,
2716 const SDLoc &dl, MVT ValTy, MVT ResTy,
2717 SelectionDAG &DAG) const {
2718 // Special case for v{8,4,2}i1 (the only boolean vectors legal in Hexagon
2719 // without any coprocessors).
2720 MVT VecTy = ty(VecV);
2721 unsigned VecWidth = VecTy.getSizeInBits();
2722 unsigned ValWidth = ValTy.getSizeInBits();
2723 assert(VecWidth == VecTy.getVectorNumElements() &&
2724 "Vector elements should equal vector width size");
2725 assert(VecWidth == 8 || VecWidth == 4 || VecWidth == 2);
2726
2727 // Check if this is an extract of the lowest bit.
2728 if (isNullConstant(IdxV) && ValTy.getSizeInBits() == 1) {
2729 // Extracting the lowest bit is a no-op, but it changes the type,
2730 // so it must be kept as an operation to avoid errors related to
2731 // type mismatches.
2732 return DAG.getNode(HexagonISD::TYPECAST, dl, MVT::i1, VecV);
2733 }
2734
2735 // If the value extracted is a single bit, use tstbit.
2736 if (ValWidth == 1) {
2737 SDValue A0 = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2738 SDValue M0 = DAG.getConstant(8 / VecWidth, dl, MVT::i32);
2739 SDValue I0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, M0);
2740 return DAG.getNode(HexagonISD::TSTBIT, dl, MVT::i1, A0, I0);
2741 }
2742
2743 // Each bool vector (v2i1, v4i1, v8i1) always occupies 8 bits in
2744 // a predicate register. The elements of the vector are repeated
2745 // in the register (if necessary) so that the total number is 8.
2746 // The extracted subvector will need to be expanded in such a way.
2747 unsigned Scale = VecWidth / ValWidth;
2748
2749 // Generate (p2d VecV) >> 8*Idx to move the interesting bytes to
2750 // position 0.
2751 assert(ty(IdxV) == MVT::i32);
2752 unsigned VecRep = 8 / VecWidth;
2753 SDValue S0 = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV,
2754 DAG.getConstant(8*VecRep, dl, MVT::i32));
2755 SDValue T0 = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2756 SDValue T1 = DAG.getNode(ISD::SRL, dl, MVT::i64, T0, S0);
2757 while (Scale > 1) {
2758 // The longest possible subvector is at most 32 bits, so it is always
2759 // contained in the low subregister.
2760 T1 = LoHalf(T1, DAG);
2761 T1 = expandPredicate(T1, dl, DAG);
2762 Scale /= 2;
2763 }
2764
2765 return DAG.getNode(HexagonISD::D2P, dl, ResTy, T1);
2766}
2767
2768SDValue
2769HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
2770 const SDLoc &dl, MVT ValTy,
2771 SelectionDAG &DAG) const {
2772 MVT VecTy = ty(VecV);
2773 if (VecTy.getVectorElementType() == MVT::i1)
2774 return insertVectorPred(VecV, ValV, IdxV, dl, ValTy, DAG);
2775
2776 unsigned VecWidth = VecTy.getSizeInBits();
2777 unsigned ValWidth = ValTy.getSizeInBits();
2778 assert(VecWidth == 32 || VecWidth == 64);
2779 assert((VecWidth % ValWidth) == 0);
2780
2781 // Cast everything to scalar integer types.
2782 MVT ScalarTy = MVT::getIntegerVT(VecWidth);
2783 // The actual type of ValV may be different than ValTy (which is related
2784 // to the vector type).
2785 unsigned VW = ty(ValV).getSizeInBits();
2786 ValV = DAG.getBitcast(MVT::getIntegerVT(VW), ValV);
2787 VecV = DAG.getBitcast(ScalarTy, VecV);
2788 if (VW != VecWidth)
2789 ValV = DAG.getAnyExtOrTrunc(ValV, dl, ScalarTy);
2790
2791 SDValue WidthV = DAG.getConstant(ValWidth, dl, MVT::i32);
2792 SDValue InsV;
2793
2794 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(IdxV)) {
2795 unsigned W = C->getZExtValue() * ValWidth;
2796 SDValue OffV = DAG.getConstant(W, dl, MVT::i32);
2797 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2798 {VecV, ValV, WidthV, OffV});
2799 } else {
2800 if (ty(IdxV) != MVT::i32)
2801 IdxV = DAG.getZExtOrTrunc(IdxV, dl, MVT::i32);
2802 SDValue OffV = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, WidthV);
2803 InsV = DAG.getNode(HexagonISD::INSERT, dl, ScalarTy,
2804 {VecV, ValV, WidthV, OffV});
2805 }
2806
2807 return DAG.getNode(ISD::BITCAST, dl, VecTy, InsV);
2808}
2809
2810SDValue
2811HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV,
2812 SDValue IdxV, const SDLoc &dl,
2813 MVT ValTy, SelectionDAG &DAG) const {
2814 MVT VecTy = ty(VecV);
2815 unsigned VecLen = VecTy.getVectorNumElements();
2816
2817 if (ValTy == MVT::i1) {
2818 SDValue ToReg = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {VecV}, DAG);
2819 SDValue Ext = DAG.getSExtOrTrunc(ValV, dl, MVT::i32);
2820 SDValue Width = DAG.getConstant(8 / VecLen, dl, MVT::i32);
2821 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, Width);
2822 SDValue Ins =
2823 DAG.getNode(HexagonISD::INSERT, dl, MVT::i32, {ToReg, Ext, Width, Idx});
2824 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Ins}, DAG);
2825 }
2826
2827 assert(ValTy.getVectorElementType() == MVT::i1);
2828 SDValue ValR = ValTy.isVector()
2829 ? DAG.getNode(HexagonISD::P2D, dl, MVT::i64, ValV)
2830 : DAG.getSExtOrTrunc(ValV, dl, MVT::i64);
2831
2832 unsigned Scale = VecLen / ValTy.getVectorNumElements();
2833 assert(Scale > 1);
2834
2835 for (unsigned R = Scale; R > 1; R /= 2) {
2836 ValR = contractPredicate(ValR, dl, DAG);
2837 ValR = getCombine(DAG.getUNDEF(MVT::i32), ValR, dl, MVT::i64, DAG);
2838 }
2839
2840 SDValue Width = DAG.getConstant(64 / Scale, dl, MVT::i32);
2841 SDValue Idx = DAG.getNode(ISD::MUL, dl, MVT::i32, IdxV, Width);
2842 SDValue VecR = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, VecV);
2843 SDValue Ins =
2844 DAG.getNode(HexagonISD::INSERT, dl, MVT::i64, {VecR, ValR, Width, Idx});
2845 return DAG.getNode(HexagonISD::D2P, dl, VecTy, Ins);
2846}
2847
2848SDValue
2849HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
2850 SelectionDAG &DAG) const {
2851 assert(ty(Vec32).getSizeInBits() == 32);
2852 if (isUndef(Vec32))
2853 return DAG.getUNDEF(MVT::i64);
2854 SDValue P = DAG.getBitcast(MVT::v4i8, Vec32);
2855 SDValue X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i16, P);
2856 return DAG.getBitcast(MVT::i64, X);
2857}
2858
2859SDValue
2860HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
2861 SelectionDAG &DAG) const {
2862 assert(ty(Vec64).getSizeInBits() == 64);
2863 if (isUndef(Vec64))
2864 return DAG.getUNDEF(MVT::i32);
2865 // Collect even bytes:
2866 SDValue A = DAG.getBitcast(MVT::v8i8, Vec64);
2867 SDValue S = DAG.getVectorShuffle(MVT::v8i8, dl, A, DAG.getUNDEF(MVT::v8i8),
2868 {0, 2, 4, 6, 1, 3, 5, 7});
2869 return extractVector(S, DAG.getConstant(0, dl, MVT::i32), dl, MVT::v4i8,
2870 MVT::i32, DAG);
2871}
2872
2873SDValue
2874HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
2875 const {
2876 if (Ty.isVector()) {
2877 unsigned W = Ty.getSizeInBits();
2878 if (W <= 64)
2879 return DAG.getBitcast(Ty, DAG.getConstant(0, dl, MVT::getIntegerVT(W)));
2880 return DAG.getNode(ISD::SPLAT_VECTOR, dl, Ty, getZero(dl, MVT::i32, DAG));
2881 }
2882
2883 if (Ty.isInteger())
2884 return DAG.getConstant(0, dl, Ty);
2885 if (Ty.isFloatingPoint())
2886 return DAG.getConstantFP(0.0, dl, Ty);
2887 llvm_unreachable("Invalid type for zero");
2888}
2889
2890SDValue
2891HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG)
2892 const {
2893 MVT ValTy = ty(Val);
2895
2896 unsigned ValLen = ValTy.getVectorNumElements();
2897 unsigned ResLen = ResTy.getVectorNumElements();
2898 if (ValLen == ResLen)
2899 return Val;
2900
2901 const SDLoc &dl(Val);
2902 assert(ValLen < ResLen);
2903 assert(ResLen % ValLen == 0);
2904
2905 SmallVector<SDValue, 4> Concats = {Val};
2906 for (unsigned i = 1, e = ResLen / ValLen; i < e; ++i)
2907 Concats.push_back(DAG.getUNDEF(ValTy));
2908
2909 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResTy, Concats);
2910}
2911
2912SDValue
2913HexagonTargetLowering::getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl,
2914 MVT ResTy, SelectionDAG &DAG) const {
2915 MVT ElemTy = ty(Hi);
2916 assert(ElemTy == ty(Lo));
2917
2918 if (!ElemTy.isVector()) {
2919 assert(ElemTy.isScalarInteger());
2920 MVT PairTy = MVT::getIntegerVT(2 * ElemTy.getSizeInBits());
2921 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, PairTy, Lo, Hi);
2922 return DAG.getBitcast(ResTy, Pair);
2923 }
2924
2925 unsigned Width = ElemTy.getSizeInBits();
2926 MVT IntTy = MVT::getIntegerVT(Width);
2927 MVT PairTy = MVT::getIntegerVT(2 * Width);
2928 SDValue Pair =
2930 {DAG.getBitcast(IntTy, Lo), DAG.getBitcast(IntTy, Hi)});
2931 return DAG.getBitcast(ResTy, Pair);
2932}
2933
2934SDValue
2936 MVT VecTy = ty(Op);
2937 unsigned BW = VecTy.getSizeInBits();
2938 const SDLoc &dl(Op);
2940 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i)
2941 Ops.push_back(Op.getOperand(i));
2942
2943 if (BW == 32)
2944 return buildVector32(Ops, dl, VecTy, DAG);
2945 if (BW == 64)
2946 return buildVector64(Ops, dl, VecTy, DAG);
2947
2948 if (VecTy == MVT::v8i1 || VecTy == MVT::v4i1 || VecTy == MVT::v2i1) {
2949 // Check if this is a special case or all-0 or all-1.
2950 bool All0 = true, All1 = true;
2951 for (SDValue P : Ops) {
2952 auto *CN = dyn_cast<ConstantSDNode>(P.getNode());
2953 if (CN == nullptr) {
2954 All0 = All1 = false;
2955 break;
2956 }
2957 uint32_t C = CN->getZExtValue();
2958 All0 &= (C == 0);
2959 All1 &= (C == 1);
2960 }
2961 if (All0)
2962 return DAG.getNode(HexagonISD::PFALSE, dl, VecTy);
2963 if (All1)
2964 return DAG.getNode(HexagonISD::PTRUE, dl, VecTy);
2965
2966 // For each i1 element in the resulting predicate register, put 1
2967 // shifted by the index of the element into a general-purpose register,
2968 // then or them together and transfer it back into a predicate register.
2969 SDValue Rs[8];
2970 SDValue Z = getZero(dl, MVT::i32, DAG);
2971 // Always produce 8 bits, repeat inputs if necessary.
2972 unsigned Rep = 8 / VecTy.getVectorNumElements();
2973 for (unsigned i = 0; i != 8; ++i) {
2974 SDValue S = DAG.getConstant(1ull << i, dl, MVT::i32);
2975 Rs[i] = DAG.getSelect(dl, MVT::i32, Ops[i/Rep], S, Z);
2976 }
2977 for (ArrayRef<SDValue> A(Rs); A.size() != 1; A = A.drop_back(A.size()/2)) {
2978 for (unsigned i = 0, e = A.size()/2; i != e; ++i)
2979 Rs[i] = DAG.getNode(ISD::OR, dl, MVT::i32, Rs[2*i], Rs[2*i+1]);
2980 }
2981 // Move the value directly to a predicate register.
2982 return getInstr(Hexagon::C2_tfrrp, dl, VecTy, {Rs[0]}, DAG);
2983 }
2984
2985 return SDValue();
2986}
2987
2988SDValue
2990 SelectionDAG &DAG) const {
2991 MVT VecTy = ty(Op);
2992 const SDLoc &dl(Op);
2993 if (VecTy.getSizeInBits() == 64) {
2994 assert(Op.getNumOperands() == 2);
2995 return getCombine(Op.getOperand(1), Op.getOperand(0), dl, VecTy, DAG);
2996 }
2997
2998 MVT ElemTy = VecTy.getVectorElementType();
2999 if (ElemTy == MVT::i1) {
3000 assert(VecTy == MVT::v2i1 || VecTy == MVT::v4i1 || VecTy == MVT::v8i1);
3001 MVT OpTy = ty(Op.getOperand(0));
3002 // Scale is how many times the operands need to be contracted to match
3003 // the representation in the target register.
3004 unsigned Scale = VecTy.getVectorNumElements() / OpTy.getVectorNumElements();
3005 assert(Scale == Op.getNumOperands() && Scale > 1);
3006
3007 // First, convert all bool vectors to integers, then generate pairwise
3008 // inserts to form values of doubled length. Up until there are only
3009 // two values left to concatenate, all of these values will fit in a
3010 // 32-bit integer, so keep them as i32 to use 32-bit inserts.
3011 SmallVector<SDValue,4> Words[2];
3012 unsigned IdxW = 0;
3013
3014 for (SDValue P : Op.getNode()->op_values()) {
3015 SDValue W = DAG.getNode(HexagonISD::P2D, dl, MVT::i64, P);
3016 for (unsigned R = Scale; R > 1; R /= 2) {
3017 W = contractPredicate(W, dl, DAG);
3018 W = getCombine(DAG.getUNDEF(MVT::i32), W, dl, MVT::i64, DAG);
3019 }
3020 W = LoHalf(W, DAG);
3021 Words[IdxW].push_back(W);
3022 }
3023
3024 while (Scale > 2) {
3025 SDValue WidthV = DAG.getConstant(64 / Scale, dl, MVT::i32);
3026 Words[IdxW ^ 1].clear();
3027
3028 for (unsigned i = 0, e = Words[IdxW].size(); i != e; i += 2) {
3029 SDValue W0 = Words[IdxW][i], W1 = Words[IdxW][i+1];
3030 // Insert W1 into W0 right next to the significant bits of W0.
3031 SDValue T = DAG.getNode(HexagonISD::INSERT, dl, MVT::i32,
3032 {W0, W1, WidthV, WidthV});
3033 Words[IdxW ^ 1].push_back(T);
3034 }
3035 IdxW ^= 1;
3036 Scale /= 2;
3037 }
3038
3039 // At this point there should only be two words left, and Scale should be 2.
3040 assert(Scale == 2 && Words[IdxW].size() == 2);
3041
3042 SDValue WW = getCombine(Words[IdxW][1], Words[IdxW][0], dl, MVT::i64, DAG);
3043 return DAG.getNode(HexagonISD::D2P, dl, VecTy, WW);
3044 }
3045
3046 return SDValue();
3047}
3048
3049SDValue
3051 SelectionDAG &DAG) const {
3052 SDValue Vec = Op.getOperand(0);
3053 MVT ElemTy = ty(Vec).getVectorElementType();
3054 return extractVector(Vec, Op.getOperand(1), SDLoc(Op), ElemTy, ty(Op), DAG);
3055}
3056
3057SDValue
3059 SelectionDAG &DAG) const {
3060 return extractVector(Op.getOperand(0), Op.getOperand(1), SDLoc(Op),
3061 ty(Op), ty(Op), DAG);
3062}
3063
3064SDValue
3066 SelectionDAG &DAG) const {
3067 return insertVector(Op.getOperand(0), Op.getOperand(1), Op.getOperand(2),
3068 SDLoc(Op), ty(Op).getVectorElementType(), DAG);
3069}
3070
3071SDValue
3073 SelectionDAG &DAG) const {
3074 SDValue ValV = Op.getOperand(1);
3075 return insertVector(Op.getOperand(0), ValV, Op.getOperand(2),
3076 SDLoc(Op), ty(ValV), DAG);
3077}
3078
3079bool
3081 // Assuming the caller does not have either a signext or zeroext modifier, and
3082 // only one value is accepted, any reasonable truncation is allowed.
3083 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
3084 return false;
3085
3086 // FIXME: in principle up to 64-bit could be made safe, but it would be very
3087 // fragile at the moment: any support for multiple value returns would be
3088 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
3089 return Ty1->getPrimitiveSizeInBits() <= 32;
3090}
3091
3092SDValue
3094 MVT Ty = ty(Op);
3095 const SDLoc &dl(Op);
3096 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
3097 MVT MemTy = LN->getMemoryVT().getSimpleVT();
3099
3100 bool LoadPred = MemTy == MVT::v2i1 || MemTy == MVT::v4i1 || MemTy == MVT::v8i1;
3101 if (LoadPred) {
3102 SDValue NL = DAG.getLoad(
3103 LN->getAddressingMode(), ISD::ZEXTLOAD, MVT::i32, dl, LN->getChain(),
3104 LN->getBasePtr(), LN->getOffset(), LN->getPointerInfo(),
3105 /*MemoryVT*/ MVT::i8, LN->getAlign(), LN->getMemOperand()->getFlags(),
3106 LN->getAAInfo(), LN->getRanges());
3107 LN = cast<LoadSDNode>(NL.getNode());
3108 }
3109
3110 Align ClaimAlign = LN->getAlign();
3111 if (!validateConstPtrAlignment(LN->getBasePtr(), ClaimAlign, dl, DAG))
3112 return replaceMemWithUndef(Op, DAG);
3113
3114 // Call LowerUnalignedLoad for all loads, it recognizes loads that
3115 // don't need extra aligning.
3116 SDValue LU = LowerUnalignedLoad(SDValue(LN, 0), DAG);
3117 if (LoadPred) {
3118 SDValue TP = getInstr(Hexagon::C2_tfrrp, dl, MemTy, {LU}, DAG);
3119 if (ET == ISD::SEXTLOAD) {
3120 TP = DAG.getSExtOrTrunc(TP, dl, Ty);
3121 } else if (ET != ISD::NON_EXTLOAD) {
3122 TP = DAG.getZExtOrTrunc(TP, dl, Ty);
3123 }
3124 SDValue Ch = cast<LoadSDNode>(LU.getNode())->getChain();
3125 return DAG.getMergeValues({TP, Ch}, dl);
3126 }
3127 return LU;
3128}
3129
3130SDValue
3132 const SDLoc &dl(Op);
3133 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
3134 SDValue Val = SN->getValue();
3135 MVT Ty = ty(Val);
3136
3137 if (Ty == MVT::v2i1 || Ty == MVT::v4i1 || Ty == MVT::v8i1) {
3138 // Store the exact predicate (all bits).
3139 SDValue TR = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32, {Val}, DAG);
3140 SDValue NS = DAG.getTruncStore(SN->getChain(), dl, TR, SN->getBasePtr(),
3141 MVT::i8, SN->getMemOperand());
3142 if (SN->isIndexed()) {
3143 NS = DAG.getIndexedStore(NS, dl, SN->getBasePtr(), SN->getOffset(),
3144 SN->getAddressingMode());
3145 }
3146 SN = cast<StoreSDNode>(NS.getNode());
3147 }
3148
3149 Align ClaimAlign = SN->getAlign();
3150 if (!validateConstPtrAlignment(SN->getBasePtr(), ClaimAlign, dl, DAG))
3151 return replaceMemWithUndef(Op, DAG);
3152
3153 MVT StoreTy = SN->getMemoryVT().getSimpleVT();
3154 Align NeedAlign = Subtarget.getTypeAlignment(StoreTy);
3155 if (ClaimAlign < NeedAlign)
3156 return expandUnalignedStore(SN, DAG);
3157 return SDValue(SN, 0);
3158}
3159
3160SDValue
3162 const {
3163 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
3164 MVT LoadTy = ty(Op);
3165 unsigned NeedAlign = Subtarget.getTypeAlignment(LoadTy).value();
3166 unsigned HaveAlign = LN->getAlign().value();
3167 if (HaveAlign >= NeedAlign)
3168 return Op;
3169
3170 const SDLoc &dl(Op);
3171 const DataLayout &DL = DAG.getDataLayout();
3172 LLVMContext &Ctx = *DAG.getContext();
3173
3174 // If the load aligning is disabled or the load can be broken up into two
3175 // smaller legal loads, do the default (target-independent) expansion.
3176 bool DoDefault = false;
3177 // Handle it in the default way if this is an indexed load.
3178 if (!LN->isUnindexed())
3179 DoDefault = true;
3180
3181 if (!AlignLoads) {
3183 *LN->getMemOperand()))
3184 return Op;
3185 DoDefault = true;
3186 }
3187 if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
3188 // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
3189 MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
3190 : MVT::getVectorVT(MVT::i8, HaveAlign);
3191 DoDefault =
3192 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand());
3193 }
3194 if (DoDefault) {
3195 std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
3196 return DAG.getMergeValues({P.first, P.second}, dl);
3197 }
3198
3199 // The code below generates two loads, both aligned as NeedAlign, and
3200 // with the distance of NeedAlign between them. For that to cover the
3201 // bits that need to be loaded (and without overlapping), the size of
3202 // the loads should be equal to NeedAlign. This is true for all loadable
3203 // types, but add an assertion in case something changes in the future.
3204 assert(LoadTy.getSizeInBits() == 8*NeedAlign);
3205
3206 unsigned LoadLen = NeedAlign;
3207 SDValue Base = LN->getBasePtr();
3208 SDValue Chain = LN->getChain();
3209 auto BO = getBaseAndOffset(Base);
3210 unsigned BaseOpc = BO.first.getOpcode();
3211 if (BaseOpc == HexagonISD::VALIGNADDR && BO.second % LoadLen == 0)
3212 return Op;
3213
3214 if (BO.second % LoadLen != 0) {
3215 BO.first = DAG.getNode(ISD::ADD, dl, MVT::i32, BO.first,
3216 DAG.getConstant(BO.second % LoadLen, dl, MVT::i32));
3217 BO.second -= BO.second % LoadLen;
3218 }
3219 SDValue BaseNoOff = (BaseOpc != HexagonISD::VALIGNADDR)
3220 ? DAG.getNode(HexagonISD::VALIGNADDR, dl, MVT::i32, BO.first,
3221 DAG.getConstant(NeedAlign, dl, MVT::i32))
3222 : BO.first;
3223 SDValue Base0 =
3224 DAG.getMemBasePlusOffset(BaseNoOff, TypeSize::getFixed(BO.second), dl);
3225 SDValue Base1 = DAG.getMemBasePlusOffset(
3226 BaseNoOff, TypeSize::getFixed(BO.second + LoadLen), dl);
3227
3228 MachineMemOperand *WideMMO = nullptr;
3229 if (MachineMemOperand *MMO = LN->getMemOperand()) {
3231 WideMMO = MF.getMachineMemOperand(
3232 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen),
3233 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
3234 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
3235 }
3236
3237 SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO);
3238 SDValue Load1 = DAG.getLoad(LoadTy, dl, Chain, Base1, WideMMO);
3239
3240 SDValue Aligned = DAG.getNode(HexagonISD::VALIGN, dl, LoadTy,
3241 {Load1, Load0, BaseNoOff.getOperand(0)});
3242 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3243 Load0.getValue(1), Load1.getValue(1));
3244 SDValue M = DAG.getMergeValues({Aligned, NewChain}, dl);
3245 return M;
3246}
3247
3248SDValue
3250 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
3251 auto *CY = dyn_cast<ConstantSDNode>(Y);
3252 if (!CY)
3253 return SDValue();
3254
3255 const SDLoc &dl(Op);
3256 SDVTList VTs = Op.getNode()->getVTList();
3257 assert(VTs.NumVTs == 2);
3258 assert(VTs.VTs[1] == MVT::i1);
3259 unsigned Opc = Op.getOpcode();
3260
3261 if (CY) {
3262 uint64_t VY = CY->getZExtValue();
3263 assert(VY != 0 && "This should have been folded");
3264 // X +/- 1
3265 if (VY != 1)
3266 return SDValue();
3267
3268 if (Opc == ISD::UADDO) {
3269 SDValue Op = DAG.getNode(ISD::ADD, dl, VTs.VTs[0], {X, Y});
3270 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op, getZero(dl, ty(Op), DAG),
3271 ISD::SETEQ);
3272 return DAG.getMergeValues({Op, Ov}, dl);
3273 }
3274 if (Opc == ISD::USUBO) {
3275 SDValue Op = DAG.getNode(ISD::SUB, dl, VTs.VTs[0], {X, Y});
3276 SDValue Ov = DAG.getSetCC(dl, MVT::i1, Op,
3277 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ);
3278 return DAG.getMergeValues({Op, Ov}, dl);
3279 }
3280 }
3281
3282 return SDValue();
3283}
3284
3286 SelectionDAG &DAG) const {
3287 const SDLoc &dl(Op);
3288 unsigned Opc = Op.getOpcode();
3289 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), C = Op.getOperand(2);
3290
3291 if (Opc == ISD::UADDO_CARRY)
3292 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(),
3293 { X, Y, C });
3294
3295 EVT CarryTy = C.getValueType();
3296 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(),
3297 { X, Y, DAG.getLogicalNOT(dl, C, CarryTy) });
3298 SDValue Out[] = { SubC.getValue(0),
3299 DAG.getLogicalNOT(dl, SubC.getValue(1), CarryTy) };
3300 return DAG.getMergeValues(Out, dl);
3301}
3302
3303SDValue
3305 SDValue Chain = Op.getOperand(0);
3306 SDValue Offset = Op.getOperand(1);
3307 SDValue Handler = Op.getOperand(2);
3308 SDLoc dl(Op);
3309 auto PtrVT = getPointerTy(DAG.getDataLayout());
3310
3311 // Mark function as containing a call to EH_RETURN.
3312 HexagonMachineFunctionInfo *FuncInfo =
3314 FuncInfo->setHasEHReturn();
3315
3316 unsigned OffsetReg = Hexagon::R28;
3317
3318 SDValue StoreAddr =
3319 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
3320 DAG.getIntPtrConstant(4, dl));
3321 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
3322 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
3323
3324 // Not needed we already use it as explict input to EH_RETURN.
3325 // MF.getRegInfo().addLiveOut(OffsetReg);
3326
3327 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
3328}
3329
3330SDValue
3332 unsigned Opc = Op.getOpcode();
3333
3334 // Handle INLINEASM first.
3335 if (Opc == ISD::INLINEASM || Opc == ISD::INLINEASM_BR)
3336 return LowerINLINEASM(Op, DAG);
3337
3338 if (isHvxOperation(Op.getNode(), DAG)) {
3339 // If HVX lowering returns nothing, try the default lowering.
3340 if (SDValue V = LowerHvxOperation(Op, DAG))
3341 return V;
3342 }
3343
3344 switch (Opc) {
3345 default:
3346#ifndef NDEBUG
3347 Op.getNode()->dumpr(&DAG);
3348 if (Opc > HexagonISD::OP_BEGIN && Opc < HexagonISD::OP_END)
3349 errs() << "Error: check for a non-legal type in this operation\n";
3350#endif
3351 llvm_unreachable("Should not custom lower this!");
3352
3353 case ISD::FDIV:
3354 return LowerFDIV(Op, DAG);
3355 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
3360 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
3361 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3362 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
3363 case ISD::LOAD: return LowerLoad(Op, DAG);
3364 case ISD::STORE: return LowerStore(Op, DAG);
3365 case ISD::UADDO:
3366 case ISD::USUBO: return LowerUAddSubO(Op, DAG);
3367 case ISD::UADDO_CARRY:
3368 case ISD::USUBO_CARRY: return LowerUAddSubOCarry(Op, DAG);
3369 case ISD::SRA:
3370 case ISD::SHL:
3371 case ISD::SRL: return LowerVECTOR_SHIFT(Op, DAG);
3372 case ISD::ROTL: return LowerROTL(Op, DAG);
3373 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3374 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3375 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
3376 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3377 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3379 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
3380 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
3381 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3383 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3384 case ISD::VASTART: return LowerVASTART(Op, DAG);
3386 case ISD::SETCC: return LowerSETCC(Op, DAG);
3387 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
3389 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
3390 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG);
3392 break;
3393 }
3394
3395 return SDValue();
3396}
3397
3398void
3401 SelectionDAG &DAG) const {
3402 if (isHvxOperation(N, DAG)) {
3403 LowerHvxOperationWrapper(N, Results, DAG);
3404 if (!Results.empty())
3405 return;
3406 }
3407
3408 SDValue Op(N, 0);
3409 unsigned Opc = N->getOpcode();
3410
3411 switch (Opc) {
3412 case HexagonISD::SSAT:
3413 case HexagonISD::USAT:
3414 Results.push_back(opJoin(SplitVectorOp(Op, DAG), SDLoc(Op), DAG));
3415 break;
3416 case ISD::STORE:
3417 // We are only custom-lowering stores to verify the alignment of the
3418 // address if it is a compile-time constant. Since a store can be
3419 // modified during type-legalization (the value being stored may need
3420 // legalization), return empty Results here to indicate that we don't
3421 // really make any changes in the custom lowering.
3422 return;
3423 default:
3425 break;
3426 }
3427}
3428
3429void
3432 SelectionDAG &DAG) const {
3433 if (isHvxOperation(N, DAG)) {
3434 ReplaceHvxNodeResults(N, Results, DAG);
3435 if (!Results.empty())
3436 return;
3437 }
3438
3439 const SDLoc &dl(N);
3440 switch (N->getOpcode()) {
3441 case ISD::SRL:
3442 case ISD::SRA:
3443 case ISD::SHL:
3444 return;
3445 case ISD::BITCAST:
3446 // Handle a bitcast from v8i1 to i8.
3447 if (N->getValueType(0) == MVT::i8) {
3448 if (N->getOperand(0).getValueType() == MVT::v8i1) {
3449 SDValue P = getInstr(Hexagon::C2_tfrpr, dl, MVT::i32,
3450 N->getOperand(0), DAG);
3451 SDValue T = DAG.getAnyExtOrTrunc(P, dl, MVT::i8);
3452 Results.push_back(T);
3453 }
3454 }
3455 break;
3456 }
3457}
3458
3459SDValue
3461 DAGCombinerInfo &DCI) const {
3462 if (isHvxOperation(N, DCI.DAG)) {
3463 if (SDValue V = PerformHvxDAGCombine(N, DCI))
3464 return V;
3465 return SDValue();
3466 }
3467
3468 SDValue Op(N, 0);
3469 const SDLoc &dl(Op);
3470 unsigned Opc = Op.getOpcode();
3471
3472 if (Opc == ISD::TRUNCATE) {
3473 SDValue Op0 = Op.getOperand(0);
3474 // fold (truncate (build pair x, y)) -> (truncate x) or x
3475 if (Op0.getOpcode() == ISD::BUILD_PAIR) {
3476 EVT TruncTy = Op.getValueType();
3477 SDValue Elem0 = Op0.getOperand(0);
3478 // if we match the low element of the pair, just return it.
3479 if (Elem0.getValueType() == TruncTy)
3480 return Elem0;
3481 // otherwise, if the low part is still too large, apply the truncate.
3482 if (Elem0.getValueType().bitsGT(TruncTy))
3483 return DCI.DAG.getNode(ISD::TRUNCATE, dl, TruncTy, Elem0);
3484 }
3485 }
3486
3487 if (DCI.isBeforeLegalizeOps())
3488 return SDValue();
3489
3490 if (Opc == HexagonISD::P2D) {
3491 SDValue P = Op.getOperand(0);
3492 switch (P.getOpcode()) {
3493 case HexagonISD::PTRUE:
3494 return DCI.DAG.getConstant(-1, dl, ty(Op));
3495 case HexagonISD::PFALSE:
3496 return getZero(dl, ty(Op), DCI.DAG);
3497 default:
3498 break;
3499 }
3500 } else if (Opc == ISD::VSELECT) {
3501 // This is pretty much duplicated in HexagonISelLoweringHVX...
3502 //
3503 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0)
3504 SDValue Cond = Op.getOperand(0);
3505 if (Cond->getOpcode() == ISD::XOR) {
3506 SDValue C0 = Cond.getOperand(0), C1 = Cond.getOperand(1);
3507 if (C1->getOpcode() == HexagonISD::PTRUE) {
3508 SDValue VSel = DCI.DAG.getNode(ISD::VSELECT, dl, ty(Op), C0,
3509 Op.getOperand(2), Op.getOperand(1));
3510 return VSel;
3511 }
3512 }
3513 } else if (Opc == ISD::TRUNCATE) {
3514 SDValue Op0 = Op.getOperand(0);
3515 // fold (truncate (build pair x, y)) -> (truncate x) or x
3516 if (Op0.getOpcode() == ISD::BUILD_PAIR) {
3517 MVT TruncTy = ty(Op);
3518 SDValue Elem0 = Op0.getOperand(0);
3519 // if we match the low element of the pair, just return it.
3520 if (ty(Elem0) == TruncTy)
3521 return Elem0;
3522 // otherwise, if the low part is still too large, apply the truncate.
3523 if (ty(Elem0).bitsGT(TruncTy))
3524 return DCI.DAG.getNode(ISD::TRUNCATE, dl, TruncTy, Elem0);
3525 }
3526 } else if (Opc == ISD::OR) {
3527 // fold (or (shl xx, s), (zext y)) -> (COMBINE (shl xx, s-32), y)
3528 // if s >= 32
3529 auto fold0 = [&, this](SDValue Op) {
3530 if (ty(Op) != MVT::i64)
3531 return SDValue();
3532 SDValue Shl = Op.getOperand(0);
3533 SDValue Zxt = Op.getOperand(1);
3534 if (Shl.getOpcode() != ISD::SHL)
3535 std::swap(Shl, Zxt);
3536
3537 if (Shl.getOpcode() != ISD::SHL || Zxt.getOpcode() != ISD::ZERO_EXTEND)
3538 return SDValue();
3539
3540 SDValue Z = Zxt.getOperand(0);
3541 auto *Amt = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
3542 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) {
3543 unsigned A = Amt->getZExtValue();
3544 SDValue S = Shl.getOperand(0);
3545 SDValue T0 = DCI.DAG.getNode(ISD::SHL, dl, ty(S), S,
3546 DCI.DAG.getConstant(A - 32, dl, MVT::i32));
3547 SDValue T1 = DCI.DAG.getZExtOrTrunc(T0, dl, MVT::i32);
3548 SDValue T2 = DCI.DAG.getZExtOrTrunc(Z, dl, MVT::i32);
3549 return DCI.DAG.getNode(HexagonISD::COMBINE, dl, MVT::i64, {T1, T2});
3550 }
3551 return SDValue();
3552 };
3553
3554 if (SDValue R = fold0(Op))
3555 return R;
3556 }
3557
3558 return SDValue();
3559}
3560
3561/// Returns relocation base for the given PIC jumptable.
3562SDValue
3564 SelectionDAG &DAG) const {
3565 int Idx = cast<JumpTableSDNode>(Table)->getIndex();
3566 EVT VT = Table.getValueType();
3568 return DAG.getNode(HexagonISD::AT_PCREL, SDLoc(Table), VT, T);
3569}
3570
3571//===----------------------------------------------------------------------===//
3572// Inline Assembly Support
3573//===----------------------------------------------------------------------===//
3574
3577 if (Constraint.size() == 1) {
3578 switch (Constraint[0]) {
3579 case 'q':
3580 case 'v':
3581 if (Subtarget.useHVXOps())
3582 return C_RegisterClass;
3583 break;
3584 case 'a':
3585 return C_RegisterClass;
3586 default:
3587 break;
3588 }
3589 }
3590 return TargetLowering::getConstraintType(Constraint);
3591}
3592
3593std::pair<unsigned, const TargetRegisterClass*>
3595 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
3596
3597 if (Constraint.size() == 1) {
3598 switch (Constraint[0]) {
3599 case 'r': // R0-R31
3600 switch (VT.SimpleTy) {
3601 default:
3602 return {0u, nullptr};
3603 case MVT::i1:
3604 case MVT::i8:
3605 case MVT::i16:
3606 case MVT::i32:
3607 case MVT::f32:
3608 return {0u, &Hexagon::IntRegsRegClass};
3609 case MVT::i64:
3610 case MVT::f64:
3611 return {0u, &Hexagon::DoubleRegsRegClass};
3612 }
3613 break;
3614 case 'a': // M0-M1
3615 if (VT != MVT::i32)
3616 return {0u, nullptr};
3617 return {0u, &Hexagon::ModRegsRegClass};
3618 case 'q': // q0-q3
3619 switch (VT.getSizeInBits()) {
3620 default:
3621 return {0u, nullptr};
3622 case 64:
3623 case 128:
3624 return {0u, &Hexagon::HvxQRRegClass};
3625 }
3626 break;
3627 case 'v': // V0-V31
3628 switch (VT.getSizeInBits()) {
3629 default:
3630 return {0u, nullptr};
3631 case 512:
3632 return {0u, &Hexagon::HvxVRRegClass};
3633 case 1024:
3634 if (Subtarget.hasV60Ops() && Subtarget.useHVX128BOps())
3635 return {0u, &Hexagon::HvxVRRegClass};
3636 return {0u, &Hexagon::HvxWRRegClass};
3637 case 2048:
3638 return {0u, &Hexagon::HvxWRRegClass};
3639 }
3640 break;
3641 default:
3642 return {0u, nullptr};
3643 }
3644 }
3645
3646 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3647}
3648
3649/// isFPImmLegal - Returns true if the target can instruction select the
3650/// specified FP immediate natively. If false, the legalizer will
3651/// materialize the FP immediate as a load from a constant pool.
3653 bool ForCodeSize) const {
3654 return true;
3655}
3656
3657/// isLegalAddressingMode - Return true if the addressing mode represented by
3658/// AM is legal for this target, for a load/store of the specified type.
3660 const AddrMode &AM, Type *Ty,
3661 unsigned AS, Instruction *I) const {
3662 if (Ty->isSized()) {
3663 // When LSR detects uses of the same base address to access different
3664 // types (e.g. unions), it will assume a conservative type for these
3665 // uses:
3666 // LSR Use: Kind=Address of void in addrspace(4294967295), ...
3667 // The type Ty passed here would then be "void". Skip the alignment
3668 // checks, but do not return false right away, since that confuses
3669 // LSR into crashing.
3670 Align A = DL.getABITypeAlign(Ty);
3671 // The base offset must be a multiple of the alignment.
3672 if (!isAligned(A, AM.BaseOffs))
3673 return false;
3674 // The shifted offset must fit in 11 bits.
3675 if (!isInt<11>(AM.BaseOffs >> Log2(A)))
3676 return false;
3677 }
3678
3679 // No global is ever allowed as a base.
3680 if (AM.BaseGV)
3681 return false;
3682
3683 int Scale = AM.Scale;
3684 if (Scale < 0)
3685 Scale = -Scale;
3686 switch (Scale) {
3687 case 0: // No scale reg, "r+i", "r", or just "i".
3688 break;
3689 default: // No scaled addressing mode.
3690 return false;
3691 }
3692 return true;
3693}
3694
3695/// Return true if folding a constant offset with the given GlobalAddress is
3696/// legal. It is frequently not legal in PIC relocation models.
3698 const {
3699 return HTM.getRelocationModel() == Reloc::Static;
3700}
3701
3702/// isLegalICmpImmediate - Return true if the specified immediate is legal
3703/// icmp immediate, that is the target has icmp instructions which can compare
3704/// a register against the immediate without having to materialize the
3705/// immediate into a register.
3707 return Imm >= -512 && Imm <= 511;
3708}
3709
3710/// IsEligibleForTailCallOptimization - Check whether the call is eligible
3711/// for tail call optimization. Targets which want to do tail call
3712/// optimization should implement this function.
3714 SDValue Callee,
3715 CallingConv::ID CalleeCC,
3716 bool IsVarArg,
3717 bool IsCalleeStructRet,
3718 bool IsCallerStructRet,
3720 const SmallVectorImpl<SDValue> &OutVals,
3722 SelectionDAG& DAG) const {
3723 const Function &CallerF = DAG.getMachineFunction().getFunction();
3724 CallingConv::ID CallerCC = CallerF.getCallingConv();
3725 bool CCMatch = CallerCC == CalleeCC;
3726
3727 // ***************************************************************************
3728 // Look for obvious safe cases to perform tail call optimization that do not
3729 // require ABI changes.
3730 // ***************************************************************************
3731
3732 // If this is a tail call via a function pointer, then don't do it!
3733 if (!isa<GlobalAddressSDNode>(Callee) &&
3734 !isa<ExternalSymbolSDNode>(Callee)) {
3735 return false;
3736 }
3737
3738 // Do not optimize if the calling conventions do not match and the conventions
3739 // used are not C or Fast.
3740 if (!CCMatch) {
3741 bool R = (CallerCC == CallingConv::C || CallerCC == CallingConv::Fast);
3742 bool E = (CalleeCC == CallingConv::C || CalleeCC == CallingConv::Fast);
3743 // If R & E, then ok.
3744 if (!R || !E)
3745 return false;
3746 }
3747
3748 // Do not tail call optimize vararg calls.
3749 if (IsVarArg)
3750 return false;
3751
3752 // Also avoid tail call optimization if either caller or callee uses struct
3753 // return semantics.
3754 if (IsCalleeStructRet || IsCallerStructRet)
3755 return false;
3756
3757 // In addition to the cases above, we also disable Tail Call Optimization if
3758 // the calling convention code that at least one outgoing argument needs to
3759 // go on the stack. We cannot check that here because at this point that
3760 // information is not available.
3761 return true;
3762}
3763
3764/// Returns the target specific optimal type for load and store operations as
3765/// a result of memset, memcpy, and memmove lowering.
3766///
3767/// If DstAlign is zero that means it's safe to destination alignment can
3768/// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
3769/// a need to check it against alignment requirement, probably because the
3770/// source does not need to be loaded. If 'IsMemset' is true, that means it's
3771/// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
3772/// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
3773/// does not need to be loaded. It returns EVT::Other if the type should be
3774/// determined using generic target-independent logic.
3776 const MemOp &Op, const AttributeList &FuncAttributes) const {
3777 if (Op.size() >= 8 && Op.isAligned(Align(8)))
3778 return MVT::i64;
3779 if (Op.size() >= 4 && Op.isAligned(Align(4)))
3780 return MVT::i32;
3781 if (Op.size() >= 2 && Op.isAligned(Align(2)))
3782 return MVT::i16;
3783 return MVT::Other;
3784}
3785
3787 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
3788 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
3789 MVT SVT = VT.getSimpleVT();
3790 if (Subtarget.isHVXVectorType(SVT, true))
3791 return allowsHvxMemoryAccess(SVT, Flags, Fast);
3793 Context, DL, VT, AddrSpace, Alignment, Flags, Fast);
3794}
3795
3797 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
3798 unsigned *Fast) const {
3799 MVT SVT = VT.getSimpleVT();
3800 if (Subtarget.isHVXVectorType(SVT, true))
3801 return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast);
3802 if (Fast)
3803 *Fast = 0;
3804 return false;
3805}
3806
3807std::pair<const TargetRegisterClass*, uint8_t>
3808HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
3809 MVT VT) const {
3810 if (Subtarget.isHVXVectorType(VT, true)) {
3811 unsigned BitWidth = VT.getSizeInBits();
3812 unsigned VecWidth = Subtarget.getVectorLength() * 8;
3813
3814 if (VT.getVectorElementType() == MVT::i1)
3815 return std::make_pair(&Hexagon::HvxQRRegClass, 1);
3816 if (BitWidth == VecWidth)
3817 return std::make_pair(&Hexagon::HvxVRRegClass, 1);
3818 assert(BitWidth == 2 * VecWidth);
3819 return std::make_pair(&Hexagon::HvxWRRegClass, 1);
3820 }
3821
3823}
3824
3826 ISD::LoadExtType ExtTy, EVT NewVT) const {
3827 // TODO: This may be worth removing. Check regression tests for diffs.
3828 if (!TargetLoweringBase::shouldReduceLoadWidth(Load, ExtTy, NewVT))
3829 return false;
3830
3831 auto *L = cast<LoadSDNode>(Load);
3832 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr());
3833 // Small-data object, do not shrink.
3834 if (BO.first.getOpcode() == HexagonISD::CONST32_GP)
3835 return false;
3836 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(BO.first)) {
3837 auto &HTM = static_cast<const HexagonTargetMachine&>(getTargetMachine());
3838 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal());
3839 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM);
3840 }
3841 return true;
3842}
3843
3845 SDNode *Node) const {
3846 AdjustHvxInstrPostInstrSelection(MI, Node);
3847}
3848
3850 Type *ValueTy, Value *Addr,
3851 AtomicOrdering Ord) const {
3852 BasicBlock *BB = Builder.GetInsertBlock();
3853 Module *M = BB->getParent()->getParent();
3854 unsigned SZ = ValueTy->getPrimitiveSizeInBits();
3855 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
3856 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
3857 : Intrinsic::hexagon_L4_loadd_locked;
3858 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3859
3860 Value *Call = Builder.CreateCall(Fn, Addr, "larx");
3861
3862 return Builder.CreateBitCast(Call, ValueTy);
3863}
3864
3865/// Perform a store-conditional operation to Addr. Return the status of the
3866/// store. This should be 0 if the store succeeded, non-zero otherwise.
3868 Value *Val, Value *Addr,
3869 AtomicOrdering Ord) const {
3870 BasicBlock *BB = Builder.GetInsertBlock();
3871 Module *M = BB->getParent()->getParent();
3872 Type *Ty = Val->getType();
3873 unsigned SZ = Ty->getPrimitiveSizeInBits();
3874
3875 Type *CastTy = Builder.getIntNTy(SZ);
3876 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
3877 Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
3878 : Intrinsic::hexagon_S4_stored_locked;
3879 Function *Fn = Intrinsic::getDeclaration(M, IntID);
3880
3881 Val = Builder.CreateBitCast(Val, CastTy);
3882
3883 Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
3884 Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
3885 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
3886 return Ext;
3887}
3888
3891 // Do not expand loads and stores that don't exceed 64 bits.
3892 return LI->getType()->getPrimitiveSizeInBits() > 64
3895}
3896
3899 // Do not expand loads and stores that don't exceed 64 bits.
3900 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
3903}
3904
3907 AtomicCmpXchgInst *AI) const {
3909}
unsigned const MachineRegisterInfo * MRI
unsigned RegSize
aarch64 promote const
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const LLT S8
This file implements a class to represent arbitrary precision integral constant values and operations...
Function Alias Analysis Results
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
#define NL
uint64_t Addr
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< int > MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memcpy"))
static Value * getUnderLyingObjectForBrevLdIntr(Value *V)
static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
static cl::opt< bool > AlignLoads("hexagon-align-loads", cl::Hidden, cl::init(false), cl::desc("Rewrite unaligned loads as a pair of aligned loads"))
static bool isBrevLdIntrinsic(const Value *Inst)
static cl::opt< int > MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memmove"))
static cl::opt< int > MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6), cl::desc("Max #stores to inline memmove"))
static Value * getBrevLdObject(Value *V)
static cl::opt< int > MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden, cl::init(false), cl::desc("Disable minimum alignment of 1 for " "arguments passed by value on stack"))
static Value * returnEdge(const PHINode *PN, Value *IntrBaseVal)
static cl::opt< int > MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memcpy"))
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static cl::opt< int > MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4), cl::desc("Max #stores to inline memset"))
static cl::opt< bool > EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden, cl::desc("Control jump table emission on Hexagon target"))
static cl::opt< int > MinimumJumpTables("minimum-jump-tables", cl::Hidden, cl::init(5), cl::desc("Set minimum jump tables"))
static cl::opt< bool > EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden, cl::desc("Enable Hexagon SDNode scheduling"))
static cl::opt< bool > EnableFastMath("ffast-math", cl::Hidden, cl::desc("Enable Fast Math processing"))
#define Hexagon_PointerSize
#define HEXAGON_LRFP_SIZE
#define HEXAGON_GOT_SYM_NAME
IRTranslator LLVM IR MI
#define RegName(no)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
#define H(x, y, z)
Definition: MD5.cpp:57
std::pair< MCSymbol *, MachineModuleInfoImpl::StubValueTy > PairTy
unsigned const TargetRegisterInfo * TRI
#define T1
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
const char LLVMTargetMachineRef TM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static llvm::Type * getVectorElementType(llvm::Type *Ty)
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1513
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
Definition: ArrayRef.h:228
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
const T * data() const
Definition: ArrayRef.h:162
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
The address of a basic block.
Definition: Constants.h:888
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
CCValAssign - Represent assignment of one arg/retval to a location.
bool isRegLoc() const
Register getLocReg() const
LocInfo getLocInfo() const
bool isMemLoc() const
int64_t getLocMemOffset() const
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1561
This class represents a function call, abstracting a target machine's calling convention.
bool isTailCall() const
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:267
const APFloat & getValueAPF() const
Definition: Constants.h:310
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:204
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:144
MachineConstantPoolValue * getMachineCPVal() const
bool isMachineConstantPoolEntry() const
const Constant * getConstVal() const
int64_t getSExtValue() const
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1398
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
This is the base abstract class for diagnostic reporting in the backend.
Interface for custom diagnostic printing.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:142
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:680
bool empty() const
Definition: Function.h:804
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:200
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:262
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
Definition: Function.h:661
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:655
const GlobalObject * getAliaseeObject() const
Definition: Globals.cpp:368
bool isValidAutoIncImm(const EVT VT, const int Offset) const
Hexagon target-specific information for each MachineFunction.
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const HexagonInstrInfo * getInstrInfo() const override
const HexagonFrameLowering * getFrameLowering() const override
const HexagonRegisterInfo * getRegisterInfo() const override
bool isHVXVectorType(EVT VecTy, bool IncludeBool=false) const
Align getTypeAlignment(MVT Ty) const
unsigned getVectorLength() const
bool isEnvironmentMusl() const
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
bool isTargetCanonicalConstantNode(SDValue Op) const override
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, SDValue InGlue, EVT PtrVT, unsigned ReturnReg, unsigned char OperandGlues) const
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
SDValue LowerCallResult(SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals, const SmallVectorImpl< SDValue > &OutVals, SDValue Callee) const
LowerCallResult - Lower the result values of an ISD::CALL into the appropriate copies out of appropri...
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Return true if the target supports a memory access of this type for the given address space and align...
SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool isShuffleMaskLegal(ArrayRef< int > Mask, EVT VT) const override
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
LegalizeAction getCustomOperationAction(SDNode &Op) const override
How to legalize this custom operation?
SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) const
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerUAddSubOCarry(SDValue Op, SelectionDAG &DAG) const
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
LowerCall - Functions arguments are copied from virtual regs to (physical regs)/(stack frame),...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const
SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool hasBitTest(SDValue X, SDValue Y) const override
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
HexagonTargetLowering(const TargetMachine &TM, const HexagonSubtarget &ST)
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool IsEligibleForTailCallOptimization(SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SmallVectorImpl< ISD::InputArg > &Ins, SelectionDAG &DAG) const
IsEligibleForTailCallOptimization - Check whether the call is eligible for tail call optimization.
SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &, EVT) const override
Return true if an FMA operation is faster than a pair of mul and add instructions.
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const
LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const
HexagonTargetObjectFile * getObjFileLowering() const override
bool isGlobalInSmallSection(const GlobalObject *GO, const TargetMachine &TM) const
Return true if this global value should be placed into small data/bss section.
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:94
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition: IRBuilder.h:533
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
Definition: IRBuilder.h:470
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition: IRBuilder.h:480
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:2224
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2110
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition: IRBuilder.h:2010
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2395
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2649
const BasicBlock * getParent() const
Definition: Instruction.h:151
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Base class for LoadSDNode and StoreSDNode.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc,...
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
Definition: Instructions.h:184
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Machine Value Type.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
SimpleValueType SimpleTy
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:585
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
unsigned getNumFixedObjects() const
Return the number of fixed objects.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Representation of each machine instruction.
Definition: MachineInstr.h:69
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
Align getAlign() const
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:307
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:41
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:722
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const TargetSubtargetInfo & getSubtarget() const
Definition: SelectionDAG.h:474
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
Definition: SelectionDAG.h:732
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:472
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
Definition: SelectionDAG.h:773
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:768
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:469
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
Definition: SelectionDAG.h:799
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
Definition: SelectionDAG.h:485
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
Definition: SelectionDAG.h:739
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
Definition: SelectionDAG.h:554
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
static void commuteMask(MutableArrayRef< int > Mask)
Change values in a shuffle permute mask assuming the two vector operands have swapped position.
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
unsigned getID() const
Return the register class ID number.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition: TypeSize.h:330
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:660
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const CustomOperand< const MCSubtargetInfo & > Msg[]
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ MO_PCREL
MO_PCREL - On a symbol operand, indicates a PC-relative relocation Used for computing a global addres...
@ MO_GOT
MO_GOT - Indicates a GOT-relative relocation.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:750
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
Definition: ISDOpcodes.h:1126
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
Definition: ISDOpcodes.h:1122
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:250
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
Definition: ISDOpcodes.h:559
@ BSWAP
Byte Swap and Counting operators.
Definition: ISDOpcodes.h:714
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
Definition: ISDOpcodes.h:1155
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:239
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1031
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:783
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition: ISDOpcodes.h:483
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition: ISDOpcodes.h:199
@ RETURNADDR
Definition: ISDOpcodes.h:95
@ GlobalAddress
Definition: ISDOpcodes.h:78
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:790
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:543
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:390
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:688
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Definition: ISDOpcodes.h:1233
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition: ISDOpcodes.h:255
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
Definition: ISDOpcodes.h:913
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:903
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition: ISDOpcodes.h:229
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
Definition: ISDOpcodes.h:939
@ GlobalTLSAddress
Definition: ISDOpcodes.h:79
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
Definition: ISDOpcodes.h:135
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:774
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
Definition: ISDOpcodes.h:620
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:1221
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
Definition: ISDOpcodes.h:988
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:930
@ BR_CC
BR_CC - Conditional branch.
Definition: ISDOpcodes.h:1077
@ SSUBO
Same for subtraction.
Definition: ISDOpcodes.h:327
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1056
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:727
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:627
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
Definition: ISDOpcodes.h:1151
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:323
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:705
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:600
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition: ISDOpcodes.h:573
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:535
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:780
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition: ISDOpcodes.h:742
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:971
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
Definition: ISDOpcodes.h:1041
@ ConstantPool
Definition: ISDOpcodes.h:82
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:798
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:674
@ GLOBAL_OFFSET_TABLE
The address of the GOT.
Definition: ISDOpcodes.h:87
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition: ISDOpcodes.h:736
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:303
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
Definition: ISDOpcodes.h:1097
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition: ISDOpcodes.h:94
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:836
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:1182
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:680
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1208
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition: ISDOpcodes.h:184
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:524
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
Definition: ISDOpcodes.h:52
@ INLINEASM
INLINEASM - Represents an inline asm block.
Definition: ISDOpcodes.h:1094
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:786
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
Definition: ISDOpcodes.h:1146
@ BlockAddress
Definition: ISDOpcodes.h:84
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition: ISDOpcodes.h:763
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Definition: ISDOpcodes.h:61
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:493
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition: ISDOpcodes.h:192
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition: ISDOpcodes.h:515
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1472
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1523
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1503
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1451
@ GeneralDynamic
Definition: CodeGen.h:46
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
constexpr double e
Definition: MathExtras.h:31
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1731
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1689
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:264
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)
format_hex - Output N as a fixed width hexadecimal.
Definition: Format.h:187
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
int getNextAvailablePluginDiagnosticKind()
Get the next available kind ID for a plugin diagnostic.
unsigned M0(unsigned Val)
Definition: VE.h:375
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
@ DS_Remark
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Extended Value Type.
Definition: ValueTypes.h:34
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition: ValueTypes.h:274
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:628
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:318
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
unsigned int NumVTs
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals