Bug Summary

File:lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
Warning:line 1123, column 42
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name HexagonISelDAGToDAG.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-8/lib/clang/8.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon -I /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/include -I /build/llvm-toolchain-snapshot-8~svn345461/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/8.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-8/lib/clang/8.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-8~svn345461/build-llvm/lib/Target/Hexagon -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-10-27-211344-32123-1 -x c++ /build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp -faddrsig

/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp

1//===-- HexagonISelDAGToDAG.cpp - A dag to dag inst selector for Hexagon --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines an instruction selector for the Hexagon target.
11//
12//===----------------------------------------------------------------------===//
13
14#include "Hexagon.h"
15#include "HexagonISelDAGToDAG.h"
16#include "HexagonISelLowering.h"
17#include "HexagonMachineFunctionInfo.h"
18#include "HexagonTargetMachine.h"
19#include "llvm/CodeGen/FunctionLoweringInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/SelectionDAGISel.h"
22#include "llvm/IR/Intrinsics.h"
23#include "llvm/Support/CommandLine.h"
24#include "llvm/Support/Debug.h"
25using namespace llvm;
26
27#define DEBUG_TYPE"hexagon-isel" "hexagon-isel"
28
29static
30cl::opt<bool>
31EnableAddressRebalancing("isel-rebalance-addr", cl::Hidden, cl::init(true),
32 cl::desc("Rebalance address calculation trees to improve "
33 "instruction selection"));
34
35// Rebalance only if this allows e.g. combining a GA with an offset or
36// factoring out a shift.
37static
38cl::opt<bool>
39RebalanceOnlyForOptimizations("rebalance-only-opt", cl::Hidden, cl::init(false),
40 cl::desc("Rebalance address tree only if this allows optimizations"));
41
42static
43cl::opt<bool>
44RebalanceOnlyImbalancedTrees("rebalance-only-imbal", cl::Hidden,
45 cl::init(false), cl::desc("Rebalance address tree only if it is imbalanced"));
46
47static cl::opt<bool> CheckSingleUse("hexagon-isel-su", cl::Hidden,
48 cl::init(true), cl::desc("Enable checking of SDNode's single-use status"));
49
50//===----------------------------------------------------------------------===//
51// Instruction Selector Implementation
52//===----------------------------------------------------------------------===//
53
54#define GET_DAGISEL_BODY HexagonDAGToDAGISel
55#include "HexagonGenDAGISel.inc"
56
57/// createHexagonISelDag - This pass converts a legalized DAG into a
58/// Hexagon-specific DAG, ready for instruction scheduling.
59///
60namespace llvm {
61FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
62 CodeGenOpt::Level OptLevel) {
63 return new HexagonDAGToDAGISel(TM, OptLevel);
64}
65}
66
67void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
68 SDValue Chain = LD->getChain();
69 SDValue Base = LD->getBasePtr();
70 SDValue Offset = LD->getOffset();
71 int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
72 EVT LoadedVT = LD->getMemoryVT();
73 unsigned Opcode = 0;
74
75 // Check for zero extended loads. Treat any-extend loads as zero extended
76 // loads.
77 ISD::LoadExtType ExtType = LD->getExtensionType();
78 bool IsZeroExt = (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD);
79 bool IsValidInc = HII->isValidAutoIncImm(LoadedVT, Inc);
80
81 assert(LoadedVT.isSimple())((LoadedVT.isSimple()) ? static_cast<void> (0) : __assert_fail
("LoadedVT.isSimple()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 81, __PRETTY_FUNCTION__))
;
82 switch (LoadedVT.getSimpleVT().SimpleTy) {
83 case MVT::i8:
84 if (IsZeroExt)
85 Opcode = IsValidInc ? Hexagon::L2_loadrub_pi : Hexagon::L2_loadrub_io;
86 else
87 Opcode = IsValidInc ? Hexagon::L2_loadrb_pi : Hexagon::L2_loadrb_io;
88 break;
89 case MVT::i16:
90 if (IsZeroExt)
91 Opcode = IsValidInc ? Hexagon::L2_loadruh_pi : Hexagon::L2_loadruh_io;
92 else
93 Opcode = IsValidInc ? Hexagon::L2_loadrh_pi : Hexagon::L2_loadrh_io;
94 break;
95 case MVT::i32:
96 case MVT::f32:
97 case MVT::v2i16:
98 case MVT::v4i8:
99 Opcode = IsValidInc ? Hexagon::L2_loadri_pi : Hexagon::L2_loadri_io;
100 break;
101 case MVT::i64:
102 case MVT::f64:
103 case MVT::v2i32:
104 case MVT::v4i16:
105 case MVT::v8i8:
106 Opcode = IsValidInc ? Hexagon::L2_loadrd_pi : Hexagon::L2_loadrd_io;
107 break;
108 case MVT::v64i8:
109 case MVT::v32i16:
110 case MVT::v16i32:
111 case MVT::v8i64:
112 case MVT::v128i8:
113 case MVT::v64i16:
114 case MVT::v32i32:
115 case MVT::v16i64:
116 if (isAlignedMemNode(LD)) {
117 if (LD->isNonTemporal())
118 Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi : Hexagon::V6_vL32b_nt_ai;
119 else
120 Opcode = IsValidInc ? Hexagon::V6_vL32b_pi : Hexagon::V6_vL32b_ai;
121 } else {
122 Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi : Hexagon::V6_vL32Ub_ai;
123 }
124 break;
125 default:
126 llvm_unreachable("Unexpected memory type in indexed load")::llvm::llvm_unreachable_internal("Unexpected memory type in indexed load"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 126)
;
127 }
128
129 SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
130 MachineMemOperand *MemOp = LD->getMemOperand();
131
132 auto getExt64 = [this,ExtType] (MachineSDNode *N, const SDLoc &dl)
133 -> MachineSDNode* {
134 if (ExtType == ISD::ZEXTLOAD || ExtType == ISD::EXTLOAD) {
135 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
136 return CurDAG->getMachineNode(Hexagon::A4_combineir, dl, MVT::i64,
137 Zero, SDValue(N, 0));
138 }
139 if (ExtType == ISD::SEXTLOAD)
140 return CurDAG->getMachineNode(Hexagon::A2_sxtw, dl, MVT::i64,
141 SDValue(N, 0));
142 return N;
143 };
144
145 // Loaded value Next address Chain
146 SDValue From[3] = { SDValue(LD,0), SDValue(LD,1), SDValue(LD,2) };
147 SDValue To[3];
148
149 EVT ValueVT = LD->getValueType(0);
150 if (ValueVT == MVT::i64 && ExtType != ISD::NON_EXTLOAD) {
151 // A load extending to i64 will actually produce i32, which will then
152 // need to be extended to i64.
153 assert(LoadedVT.getSizeInBits() <= 32)((LoadedVT.getSizeInBits() <= 32) ? static_cast<void>
(0) : __assert_fail ("LoadedVT.getSizeInBits() <= 32", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 153, __PRETTY_FUNCTION__))
;
154 ValueVT = MVT::i32;
155 }
156
157 if (IsValidInc) {
158 MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT,
159 MVT::i32, MVT::Other, Base,
160 IncV, Chain);
161 CurDAG->setNodeMemRefs(L, {MemOp});
162 To[1] = SDValue(L, 1); // Next address.
163 To[2] = SDValue(L, 2); // Chain.
164 // Handle special case for extension to i64.
165 if (LD->getValueType(0) == MVT::i64)
166 L = getExt64(L, dl);
167 To[0] = SDValue(L, 0); // Loaded (extended) value.
168 } else {
169 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
170 MachineSDNode *L = CurDAG->getMachineNode(Opcode, dl, ValueVT, MVT::Other,
171 Base, Zero, Chain);
172 CurDAG->setNodeMemRefs(L, {MemOp});
173 To[2] = SDValue(L, 1); // Chain.
174 MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
175 Base, IncV);
176 To[1] = SDValue(A, 0); // Next address.
177 // Handle special case for extension to i64.
178 if (LD->getValueType(0) == MVT::i64)
179 L = getExt64(L, dl);
180 To[0] = SDValue(L, 0); // Loaded (extended) value.
181 }
182 ReplaceUses(From, To, 3);
183 CurDAG->RemoveDeadNode(LD);
184}
185
186MachineSDNode *HexagonDAGToDAGISel::LoadInstrForLoadIntrinsic(SDNode *IntN) {
187 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
188 return nullptr;
189
190 SDLoc dl(IntN);
191 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
192
193 static std::map<unsigned,unsigned> LoadPciMap = {
194 { Intrinsic::hexagon_circ_ldb, Hexagon::L2_loadrb_pci },
195 { Intrinsic::hexagon_circ_ldub, Hexagon::L2_loadrub_pci },
196 { Intrinsic::hexagon_circ_ldh, Hexagon::L2_loadrh_pci },
197 { Intrinsic::hexagon_circ_lduh, Hexagon::L2_loadruh_pci },
198 { Intrinsic::hexagon_circ_ldw, Hexagon::L2_loadri_pci },
199 { Intrinsic::hexagon_circ_ldd, Hexagon::L2_loadrd_pci },
200 };
201 auto FLC = LoadPciMap.find(IntNo);
202 if (FLC != LoadPciMap.end()) {
203 EVT ValTy = (IntNo == Intrinsic::hexagon_circ_ldd) ? MVT::i64 : MVT::i32;
204 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
205 // Operands: { Base, Increment, Modifier, Chain }
206 auto Inc = cast<ConstantSDNode>(IntN->getOperand(5));
207 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), dl, MVT::i32);
208 MachineSDNode *Res = CurDAG->getMachineNode(FLC->second, dl, RTys,
209 { IntN->getOperand(2), I, IntN->getOperand(4),
210 IntN->getOperand(0) });
211 return Res;
212 }
213
214 return nullptr;
215}
216
217SDNode *HexagonDAGToDAGISel::StoreInstrForLoadIntrinsic(MachineSDNode *LoadN,
218 SDNode *IntN) {
219 // The "LoadN" is just a machine load instruction. The intrinsic also
220 // involves storing it. Generate an appropriate store to the location
221 // given in the intrinsic's operand(3).
222 uint64_t F = HII->get(LoadN->getMachineOpcode()).TSFlags;
223 unsigned SizeBits = (F >> HexagonII::MemAccessSizePos) &
224 HexagonII::MemAccesSizeMask;
225 unsigned Size = 1U << (SizeBits-1);
226
227 SDLoc dl(IntN);
228 MachinePointerInfo PI;
229 SDValue TS;
230 SDValue Loc = IntN->getOperand(3);
231
232 if (Size >= 4)
233 TS = CurDAG->getStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc, PI,
234 Size);
235 else
236 TS = CurDAG->getTruncStore(SDValue(LoadN, 2), dl, SDValue(LoadN, 0), Loc,
237 PI, MVT::getIntegerVT(Size * 8), Size);
238
239 SDNode *StoreN;
240 {
241 HandleSDNode Handle(TS);
242 SelectStore(TS.getNode());
243 StoreN = Handle.getValue().getNode();
244 }
245
246 // Load's results are { Loaded value, Updated pointer, Chain }
247 ReplaceUses(SDValue(IntN, 0), SDValue(LoadN, 1));
248 ReplaceUses(SDValue(IntN, 1), SDValue(StoreN, 0));
249 return StoreN;
250}
251
252bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) {
253 // The intrinsics for load circ/brev perform two operations:
254 // 1. Load a value V from the specified location, using the addressing
255 // mode corresponding to the intrinsic.
256 // 2. Store V into a specified location. This location is typically a
257 // local, temporary object.
258 // In many cases, the program using these intrinsics will immediately
259 // load V again from the local object. In those cases, when certain
260 // conditions are met, the last load can be removed.
261 // This function identifies and optimizes this pattern. If the pattern
262 // cannot be optimized, it returns nullptr, which will cause the load
263 // to be selected separately from the intrinsic (which will be handled
264 // in SelectIntrinsicWChain).
265
266 SDValue Ch = N->getOperand(0);
267 SDValue Loc = N->getOperand(1);
268
269 // Assume that the load and the intrinsic are connected directly with a
270 // chain:
271 // t1: i32,ch = int.load ..., ..., ..., Loc, ... // <-- C
272 // t2: i32,ch = load t1:1, Loc, ...
273 SDNode *C = Ch.getNode();
274
275 if (C->getOpcode() != ISD::INTRINSIC_W_CHAIN)
276 return false;
277
278 // The second load can only be eliminated if its extension type matches
279 // that of the load instruction corresponding to the intrinsic. The user
280 // can provide an address of an unsigned variable to store the result of
281 // a sign-extending intrinsic into (or the other way around).
282 ISD::LoadExtType IntExt;
283 switch (cast<ConstantSDNode>(C->getOperand(1))->getZExtValue()) {
284 case Intrinsic::hexagon_circ_ldub:
285 case Intrinsic::hexagon_circ_lduh:
286 IntExt = ISD::ZEXTLOAD;
287 break;
288 case Intrinsic::hexagon_circ_ldw:
289 case Intrinsic::hexagon_circ_ldd:
290 IntExt = ISD::NON_EXTLOAD;
291 break;
292 default:
293 IntExt = ISD::SEXTLOAD;
294 break;
295 }
296 if (N->getExtensionType() != IntExt)
297 return false;
298
299 // Make sure the target location for the loaded value in the load intrinsic
300 // is the location from which LD (or N) is loading.
301 if (C->getNumOperands() < 4 || Loc.getNode() != C->getOperand(3).getNode())
302 return false;
303
304 if (MachineSDNode *L = LoadInstrForLoadIntrinsic(C)) {
305 SDNode *S = StoreInstrForLoadIntrinsic(L, C);
306 SDValue F[] = { SDValue(N,0), SDValue(N,1), SDValue(C,0), SDValue(C,1) };
307 SDValue T[] = { SDValue(L,0), SDValue(S,0), SDValue(L,1), SDValue(S,0) };
308 ReplaceUses(F, T, array_lengthof(T));
309 // This transformation will leave the intrinsic dead. If it remains in
310 // the DAG, the selection code will see it again, but without the load,
311 // and it will generate a store that is normally required for it.
312 CurDAG->RemoveDeadNode(C);
313 return true;
314 }
315 return false;
316}
317
318// Convert the bit-reverse load intrinsic to appropriate target instruction.
319bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) {
320 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
321 return false;
322
323 const SDLoc &dl(IntN);
324 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
325
326 static const std::map<unsigned, unsigned> LoadBrevMap = {
327 { Intrinsic::hexagon_L2_loadrb_pbr, Hexagon::L2_loadrb_pbr },
328 { Intrinsic::hexagon_L2_loadrub_pbr, Hexagon::L2_loadrub_pbr },
329 { Intrinsic::hexagon_L2_loadrh_pbr, Hexagon::L2_loadrh_pbr },
330 { Intrinsic::hexagon_L2_loadruh_pbr, Hexagon::L2_loadruh_pbr },
331 { Intrinsic::hexagon_L2_loadri_pbr, Hexagon::L2_loadri_pbr },
332 { Intrinsic::hexagon_L2_loadrd_pbr, Hexagon::L2_loadrd_pbr }
333 };
334 auto FLI = LoadBrevMap.find(IntNo);
335 if (FLI != LoadBrevMap.end()) {
336 EVT ValTy =
337 (IntNo == Intrinsic::hexagon_L2_loadrd_pbr) ? MVT::i64 : MVT::i32;
338 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
339 // Operands of Intrinsic: {chain, enum ID of intrinsic, baseptr,
340 // modifier}.
341 // Operands of target instruction: { Base, Modifier, Chain }.
342 MachineSDNode *Res = CurDAG->getMachineNode(
343 FLI->second, dl, RTys,
344 {IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(0)});
345
346 MachineMemOperand *MemOp = cast<MemIntrinsicSDNode>(IntN)->getMemOperand();
347 CurDAG->setNodeMemRefs(Res, {MemOp});
348
349 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
350 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
351 ReplaceUses(SDValue(IntN, 2), SDValue(Res, 2));
352 CurDAG->RemoveDeadNode(IntN);
353 return true;
354 }
355 return false;
356}
357
358/// Generate a machine instruction node for the new circlar buffer intrinsics.
359/// The new versions use a CSx register instead of the K field.
360bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
361 if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
362 return false;
363
364 SDLoc DL(IntN);
365 unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
366 SmallVector<SDValue, 7> Ops;
367
368 static std::map<unsigned,unsigned> LoadNPcMap = {
369 { Intrinsic::hexagon_L2_loadrub_pci, Hexagon::PS_loadrub_pci },
370 { Intrinsic::hexagon_L2_loadrb_pci, Hexagon::PS_loadrb_pci },
371 { Intrinsic::hexagon_L2_loadruh_pci, Hexagon::PS_loadruh_pci },
372 { Intrinsic::hexagon_L2_loadrh_pci, Hexagon::PS_loadrh_pci },
373 { Intrinsic::hexagon_L2_loadri_pci, Hexagon::PS_loadri_pci },
374 { Intrinsic::hexagon_L2_loadrd_pci, Hexagon::PS_loadrd_pci },
375 { Intrinsic::hexagon_L2_loadrub_pcr, Hexagon::PS_loadrub_pcr },
376 { Intrinsic::hexagon_L2_loadrb_pcr, Hexagon::PS_loadrb_pcr },
377 { Intrinsic::hexagon_L2_loadruh_pcr, Hexagon::PS_loadruh_pcr },
378 { Intrinsic::hexagon_L2_loadrh_pcr, Hexagon::PS_loadrh_pcr },
379 { Intrinsic::hexagon_L2_loadri_pcr, Hexagon::PS_loadri_pcr },
380 { Intrinsic::hexagon_L2_loadrd_pcr, Hexagon::PS_loadrd_pcr }
381 };
382 auto FLI = LoadNPcMap.find (IntNo);
383 if (FLI != LoadNPcMap.end()) {
384 EVT ValTy = MVT::i32;
385 if (IntNo == Intrinsic::hexagon_L2_loadrd_pci ||
386 IntNo == Intrinsic::hexagon_L2_loadrd_pcr)
387 ValTy = MVT::i64;
388 EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
389 // Handle load.*_pci case which has 6 operands.
390 if (IntN->getNumOperands() == 6) {
391 auto Inc = cast<ConstantSDNode>(IntN->getOperand(3));
392 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), DL, MVT::i32);
393 // Operands: { Base, Increment, Modifier, Start, Chain }.
394 Ops = { IntN->getOperand(2), I, IntN->getOperand(4), IntN->getOperand(5),
395 IntN->getOperand(0) };
396 } else
397 // Handle load.*_pcr case which has 5 operands.
398 // Operands: { Base, Modifier, Start, Chain }.
399 Ops = { IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(4),
400 IntN->getOperand(0) };
401 MachineSDNode *Res = CurDAG->getMachineNode(FLI->second, DL, RTys, Ops);
402 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
403 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
404 ReplaceUses(SDValue(IntN, 2), SDValue(Res, 2));
405 CurDAG->RemoveDeadNode(IntN);
406 return true;
407 }
408
409 static std::map<unsigned,unsigned> StoreNPcMap = {
410 { Intrinsic::hexagon_S2_storerb_pci, Hexagon::PS_storerb_pci },
411 { Intrinsic::hexagon_S2_storerh_pci, Hexagon::PS_storerh_pci },
412 { Intrinsic::hexagon_S2_storerf_pci, Hexagon::PS_storerf_pci },
413 { Intrinsic::hexagon_S2_storeri_pci, Hexagon::PS_storeri_pci },
414 { Intrinsic::hexagon_S2_storerd_pci, Hexagon::PS_storerd_pci },
415 { Intrinsic::hexagon_S2_storerb_pcr, Hexagon::PS_storerb_pcr },
416 { Intrinsic::hexagon_S2_storerh_pcr, Hexagon::PS_storerh_pcr },
417 { Intrinsic::hexagon_S2_storerf_pcr, Hexagon::PS_storerf_pcr },
418 { Intrinsic::hexagon_S2_storeri_pcr, Hexagon::PS_storeri_pcr },
419 { Intrinsic::hexagon_S2_storerd_pcr, Hexagon::PS_storerd_pcr }
420 };
421 auto FSI = StoreNPcMap.find (IntNo);
422 if (FSI != StoreNPcMap.end()) {
423 EVT RTys[] = { MVT::i32, MVT::Other };
424 // Handle store.*_pci case which has 7 operands.
425 if (IntN->getNumOperands() == 7) {
426 auto Inc = cast<ConstantSDNode>(IntN->getOperand(3));
427 SDValue I = CurDAG->getTargetConstant(Inc->getSExtValue(), DL, MVT::i32);
428 // Operands: { Base, Increment, Modifier, Value, Start, Chain }.
429 Ops = { IntN->getOperand(2), I, IntN->getOperand(4), IntN->getOperand(5),
430 IntN->getOperand(6), IntN->getOperand(0) };
431 } else
432 // Handle store.*_pcr case which has 6 operands.
433 // Operands: { Base, Modifier, Value, Start, Chain }.
434 Ops = { IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(4),
435 IntN->getOperand(5), IntN->getOperand(0) };
436 MachineSDNode *Res = CurDAG->getMachineNode(FSI->second, DL, RTys, Ops);
437 ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
438 ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
439 CurDAG->RemoveDeadNode(IntN);
440 return true;
441 }
442
443 return false;
444}
445
446void HexagonDAGToDAGISel::SelectLoad(SDNode *N) {
447 SDLoc dl(N);
448 LoadSDNode *LD = cast<LoadSDNode>(N);
449
450 // Handle indexed loads.
451 ISD::MemIndexedMode AM = LD->getAddressingMode();
452 if (AM != ISD::UNINDEXED) {
453 SelectIndexedLoad(LD, dl);
454 return;
455 }
456
457 // Handle patterns using circ/brev load intrinsics.
458 if (tryLoadOfLoadIntrinsic(LD))
459 return;
460
461 SelectCode(LD);
462}
463
464void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
465 SDValue Chain = ST->getChain();
466 SDValue Base = ST->getBasePtr();
467 SDValue Offset = ST->getOffset();
468 SDValue Value = ST->getValue();
469 // Get the constant value.
470 int32_t Inc = cast<ConstantSDNode>(Offset.getNode())->getSExtValue();
471 EVT StoredVT = ST->getMemoryVT();
472 EVT ValueVT = Value.getValueType();
473
474 bool IsValidInc = HII->isValidAutoIncImm(StoredVT, Inc);
475 unsigned Opcode = 0;
476
477 assert(StoredVT.isSimple())((StoredVT.isSimple()) ? static_cast<void> (0) : __assert_fail
("StoredVT.isSimple()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 477, __PRETTY_FUNCTION__))
;
478 switch (StoredVT.getSimpleVT().SimpleTy) {
479 case MVT::i8:
480 Opcode = IsValidInc ? Hexagon::S2_storerb_pi : Hexagon::S2_storerb_io;
481 break;
482 case MVT::i16:
483 Opcode = IsValidInc ? Hexagon::S2_storerh_pi : Hexagon::S2_storerh_io;
484 break;
485 case MVT::i32:
486 case MVT::f32:
487 case MVT::v2i16:
488 case MVT::v4i8:
489 Opcode = IsValidInc ? Hexagon::S2_storeri_pi : Hexagon::S2_storeri_io;
490 break;
491 case MVT::i64:
492 case MVT::f64:
493 case MVT::v2i32:
494 case MVT::v4i16:
495 case MVT::v8i8:
496 Opcode = IsValidInc ? Hexagon::S2_storerd_pi : Hexagon::S2_storerd_io;
497 break;
498 case MVT::v64i8:
499 case MVT::v32i16:
500 case MVT::v16i32:
501 case MVT::v8i64:
502 case MVT::v128i8:
503 case MVT::v64i16:
504 case MVT::v32i32:
505 case MVT::v16i64:
506 if (isAlignedMemNode(ST)) {
507 if (ST->isNonTemporal())
508 Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi : Hexagon::V6_vS32b_nt_ai;
509 else
510 Opcode = IsValidInc ? Hexagon::V6_vS32b_pi : Hexagon::V6_vS32b_ai;
511 } else {
512 Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi : Hexagon::V6_vS32Ub_ai;
513 }
514 break;
515 default:
516 llvm_unreachable("Unexpected memory type in indexed store")::llvm::llvm_unreachable_internal("Unexpected memory type in indexed store"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 516)
;
517 }
518
519 if (ST->isTruncatingStore() && ValueVT.getSizeInBits() == 64) {
520 assert(StoredVT.getSizeInBits() < 64 && "Not a truncating store")((StoredVT.getSizeInBits() < 64 && "Not a truncating store"
) ? static_cast<void> (0) : __assert_fail ("StoredVT.getSizeInBits() < 64 && \"Not a truncating store\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 520, __PRETTY_FUNCTION__))
;
521 Value = CurDAG->getTargetExtractSubreg(Hexagon::isub_lo,
522 dl, MVT::i32, Value);
523 }
524
525 SDValue IncV = CurDAG->getTargetConstant(Inc, dl, MVT::i32);
526 MachineMemOperand *MemOp = ST->getMemOperand();
527
528 // Next address Chain
529 SDValue From[2] = { SDValue(ST,0), SDValue(ST,1) };
530 SDValue To[2];
531
532 if (IsValidInc) {
533 // Build post increment store.
534 SDValue Ops[] = { Base, IncV, Value, Chain };
535 MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::i32, MVT::Other,
536 Ops);
537 CurDAG->setNodeMemRefs(S, {MemOp});
538 To[0] = SDValue(S, 0);
539 To[1] = SDValue(S, 1);
540 } else {
541 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
542 SDValue Ops[] = { Base, Zero, Value, Chain };
543 MachineSDNode *S = CurDAG->getMachineNode(Opcode, dl, MVT::Other, Ops);
544 CurDAG->setNodeMemRefs(S, {MemOp});
545 To[1] = SDValue(S, 0);
546 MachineSDNode *A = CurDAG->getMachineNode(Hexagon::A2_addi, dl, MVT::i32,
547 Base, IncV);
548 To[0] = SDValue(A, 0);
549 }
550
551 ReplaceUses(From, To, 2);
552 CurDAG->RemoveDeadNode(ST);
553}
554
555void HexagonDAGToDAGISel::SelectStore(SDNode *N) {
556 SDLoc dl(N);
557 StoreSDNode *ST = cast<StoreSDNode>(N);
558
559 // Handle indexed stores.
560 ISD::MemIndexedMode AM = ST->getAddressingMode();
561 if (AM != ISD::UNINDEXED) {
562 SelectIndexedStore(ST, dl);
563 return;
564 }
565
566 SelectCode(ST);
567}
568
569void HexagonDAGToDAGISel::SelectSHL(SDNode *N) {
570 SDLoc dl(N);
571 SDValue Shl_0 = N->getOperand(0);
572 SDValue Shl_1 = N->getOperand(1);
573
574 auto Default = [this,N] () -> void { SelectCode(N); };
575
576 if (N->getValueType(0) != MVT::i32 || Shl_1.getOpcode() != ISD::Constant)
577 return Default();
578
579 // RHS is const.
580 int32_t ShlConst = cast<ConstantSDNode>(Shl_1)->getSExtValue();
581
582 if (Shl_0.getOpcode() == ISD::MUL) {
583 SDValue Mul_0 = Shl_0.getOperand(0); // Val
584 SDValue Mul_1 = Shl_0.getOperand(1); // Const
585 // RHS of mul is const.
586 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mul_1)) {
587 int32_t ValConst = C->getSExtValue() << ShlConst;
588 if (isInt<9>(ValConst)) {
589 SDValue Val = CurDAG->getTargetConstant(ValConst, dl, MVT::i32);
590 SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
591 MVT::i32, Mul_0, Val);
592 ReplaceNode(N, Result);
593 return;
594 }
595 }
596 return Default();
597 }
598
599 if (Shl_0.getOpcode() == ISD::SUB) {
600 SDValue Sub_0 = Shl_0.getOperand(0); // Const 0
601 SDValue Sub_1 = Shl_0.getOperand(1); // Val
602 if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Sub_0)) {
603 if (C1->getSExtValue() != 0 || Sub_1.getOpcode() != ISD::SHL)
604 return Default();
605 SDValue Shl2_0 = Sub_1.getOperand(0); // Val
606 SDValue Shl2_1 = Sub_1.getOperand(1); // Const
607 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(Shl2_1)) {
608 int32_t ValConst = 1 << (ShlConst + C2->getSExtValue());
609 if (isInt<9>(-ValConst)) {
610 SDValue Val = CurDAG->getTargetConstant(-ValConst, dl, MVT::i32);
611 SDNode *Result = CurDAG->getMachineNode(Hexagon::M2_mpysmi, dl,
612 MVT::i32, Shl2_0, Val);
613 ReplaceNode(N, Result);
614 return;
615 }
616 }
617 }
618 }
619
620 return Default();
621}
622
623//
624// Handling intrinsics for circular load and bitreverse load.
625//
626void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
627 if (MachineSDNode *L = LoadInstrForLoadIntrinsic(N)) {
628 StoreInstrForLoadIntrinsic(L, N);
629 CurDAG->RemoveDeadNode(N);
630 return;
631 }
632
633 // Handle bit-reverse load intrinsics.
634 if (SelectBrevLdIntrinsic(N))
635 return;
636
637 if (SelectNewCircIntrinsic(N))
638 return;
639
640 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
641 if (IntNo == Intrinsic::hexagon_V6_vgathermw ||
642 IntNo == Intrinsic::hexagon_V6_vgathermw_128B ||
643 IntNo == Intrinsic::hexagon_V6_vgathermh ||
644 IntNo == Intrinsic::hexagon_V6_vgathermh_128B ||
645 IntNo == Intrinsic::hexagon_V6_vgathermhw ||
646 IntNo == Intrinsic::hexagon_V6_vgathermhw_128B) {
647 SelectV65Gather(N);
648 return;
649 }
650 if (IntNo == Intrinsic::hexagon_V6_vgathermwq ||
651 IntNo == Intrinsic::hexagon_V6_vgathermwq_128B ||
652 IntNo == Intrinsic::hexagon_V6_vgathermhq ||
653 IntNo == Intrinsic::hexagon_V6_vgathermhq_128B ||
654 IntNo == Intrinsic::hexagon_V6_vgathermhwq ||
655 IntNo == Intrinsic::hexagon_V6_vgathermhwq_128B) {
656 SelectV65GatherPred(N);
657 return;
658 }
659
660 SelectCode(N);
661}
662
663void HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
664 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
665 unsigned Bits;
666 switch (IID) {
667 case Intrinsic::hexagon_S2_vsplatrb:
668 Bits = 8;
669 break;
670 case Intrinsic::hexagon_S2_vsplatrh:
671 Bits = 16;
672 break;
673 case Intrinsic::hexagon_V6_vaddcarry:
674 case Intrinsic::hexagon_V6_vaddcarry_128B:
675 case Intrinsic::hexagon_V6_vsubcarry:
676 case Intrinsic::hexagon_V6_vsubcarry_128B:
677 SelectHVXDualOutput(N);
678 return;
679 default:
680 SelectCode(N);
681 return;
682 }
683
684 SDValue V = N->getOperand(1);
685 SDValue U;
686 if (keepsLowBits(V, Bits, U)) {
687 SDValue R = CurDAG->getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
688 N->getOperand(0), U);
689 ReplaceNode(N, R.getNode());
690 SelectCode(R.getNode());
691 return;
692 }
693 SelectCode(N);
694}
695
696//
697// Map floating point constant values.
698//
699void HexagonDAGToDAGISel::SelectConstantFP(SDNode *N) {
700 SDLoc dl(N);
701 ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
702 APInt A = CN->getValueAPF().bitcastToAPInt();
703 if (N->getValueType(0) == MVT::f32) {
704 SDValue V = CurDAG->getTargetConstant(A.getZExtValue(), dl, MVT::i32);
705 ReplaceNode(N, CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::f32, V));
706 return;
707 }
708 if (N->getValueType(0) == MVT::f64) {
709 SDValue V = CurDAG->getTargetConstant(A.getZExtValue(), dl, MVT::i64);
710 ReplaceNode(N, CurDAG->getMachineNode(Hexagon::CONST64, dl, MVT::f64, V));
711 return;
712 }
713
714 SelectCode(N);
715}
716
717//
718// Map boolean values.
719//
720void HexagonDAGToDAGISel::SelectConstant(SDNode *N) {
721 if (N->getValueType(0) == MVT::i1) {
722 assert(!(cast<ConstantSDNode>(N)->getZExtValue() >> 1))((!(cast<ConstantSDNode>(N)->getZExtValue() >>
1)) ? static_cast<void> (0) : __assert_fail ("!(cast<ConstantSDNode>(N)->getZExtValue() >> 1)"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 722, __PRETTY_FUNCTION__))
;
723 unsigned Opc = (cast<ConstantSDNode>(N)->getSExtValue() != 0)
724 ? Hexagon::PS_true
725 : Hexagon::PS_false;
726 ReplaceNode(N, CurDAG->getMachineNode(Opc, SDLoc(N), MVT::i1));
727 return;
728 }
729
730 SelectCode(N);
731}
732
733void HexagonDAGToDAGISel::SelectFrameIndex(SDNode *N) {
734 MachineFrameInfo &MFI = MF->getFrameInfo();
735 const HexagonFrameLowering *HFI = HST->getFrameLowering();
736 int FX = cast<FrameIndexSDNode>(N)->getIndex();
737 unsigned StkA = HFI->getStackAlignment();
738 unsigned MaxA = MFI.getMaxAlignment();
739 SDValue FI = CurDAG->getTargetFrameIndex(FX, MVT::i32);
740 SDLoc DL(N);
741 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
742 SDNode *R = nullptr;
743
744 // Use PS_fi when:
745 // - the object is fixed, or
746 // - there are no objects with higher-than-default alignment, or
747 // - there are no dynamically allocated objects.
748 // Otherwise, use PS_fia.
749 if (FX < 0 || MaxA <= StkA || !MFI.hasVarSizedObjects()) {
750 R = CurDAG->getMachineNode(Hexagon::PS_fi, DL, MVT::i32, FI, Zero);
751 } else {
752 auto &HMFI = *MF->getInfo<HexagonMachineFunctionInfo>();
753 unsigned AR = HMFI.getStackAlignBaseVReg();
754 SDValue CH = CurDAG->getEntryNode();
755 SDValue Ops[] = { CurDAG->getCopyFromReg(CH, DL, AR, MVT::i32), FI, Zero };
756 R = CurDAG->getMachineNode(Hexagon::PS_fia, DL, MVT::i32, Ops);
757 }
758
759 ReplaceNode(N, R);
760}
761
762void HexagonDAGToDAGISel::SelectAddSubCarry(SDNode *N) {
763 unsigned OpcCarry = N->getOpcode() == HexagonISD::ADDC ? Hexagon::A4_addp_c
764 : Hexagon::A4_subp_c;
765 SDNode *C = CurDAG->getMachineNode(OpcCarry, SDLoc(N), N->getVTList(),
766 { N->getOperand(0), N->getOperand(1),
767 N->getOperand(2) });
768 ReplaceNode(N, C);
769}
770
771void HexagonDAGToDAGISel::SelectVAlign(SDNode *N) {
772 MVT ResTy = N->getValueType(0).getSimpleVT();
773 if (HST->isHVXVectorType(ResTy, true))
774 return SelectHvxVAlign(N);
775
776 const SDLoc &dl(N);
777 unsigned VecLen = ResTy.getSizeInBits();
778 if (VecLen == 32) {
779 SDValue Ops[] = {
780 CurDAG->getTargetConstant(Hexagon::DoubleRegsRegClassID, dl, MVT::i32),
781 N->getOperand(0),
782 CurDAG->getTargetConstant(Hexagon::isub_hi, dl, MVT::i32),
783 N->getOperand(1),
784 CurDAG->getTargetConstant(Hexagon::isub_lo, dl, MVT::i32)
785 };
786 SDNode *R = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl,
787 MVT::i64, Ops);
788
789 // Shift right by "(Addr & 0x3) * 8" bytes.
790 SDValue M0 = CurDAG->getTargetConstant(0x18, dl, MVT::i32);
791 SDValue M1 = CurDAG->getTargetConstant(0x03, dl, MVT::i32);
792 SDNode *C = CurDAG->getMachineNode(Hexagon::S4_andi_asl_ri, dl, MVT::i32,
793 M0, N->getOperand(2), M1);
794 SDNode *S = CurDAG->getMachineNode(Hexagon::S2_lsr_r_p, dl, MVT::i64,
795 SDValue(R, 0), SDValue(C, 0));
796 SDValue E = CurDAG->getTargetExtractSubreg(Hexagon::isub_lo, dl, ResTy,
797 SDValue(S, 0));
798 ReplaceNode(N, E.getNode());
799 } else {
800 assert(VecLen == 64)((VecLen == 64) ? static_cast<void> (0) : __assert_fail
("VecLen == 64", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 800, __PRETTY_FUNCTION__))
;
801 SDNode *Pu = CurDAG->getMachineNode(Hexagon::C2_tfrrp, dl, MVT::v8i1,
802 N->getOperand(2));
803 SDNode *VA = CurDAG->getMachineNode(Hexagon::S2_valignrb, dl, ResTy,
804 N->getOperand(0), N->getOperand(1),
805 SDValue(Pu,0));
806 ReplaceNode(N, VA);
807 }
808}
809
810void HexagonDAGToDAGISel::SelectVAlignAddr(SDNode *N) {
811 const SDLoc &dl(N);
812 SDValue A = N->getOperand(1);
813 int Mask = -cast<ConstantSDNode>(A.getNode())->getSExtValue();
814 assert(isPowerOf2_32(-Mask))((isPowerOf2_32(-Mask)) ? static_cast<void> (0) : __assert_fail
("isPowerOf2_32(-Mask)", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 814, __PRETTY_FUNCTION__))
;
815
816 SDValue M = CurDAG->getTargetConstant(Mask, dl, MVT::i32);
817 SDNode *AA = CurDAG->getMachineNode(Hexagon::A2_andir, dl, MVT::i32,
818 N->getOperand(0), M);
819 ReplaceNode(N, AA);
820}
821
822// Handle these nodes here to avoid having to write patterns for all
823// combinations of input/output types. In all cases, the resulting
824// instruction is the same.
825void HexagonDAGToDAGISel::SelectTypecast(SDNode *N) {
826 SDValue Op = N->getOperand(0);
827 MVT OpTy = Op.getValueType().getSimpleVT();
828 SDNode *T = CurDAG->MorphNodeTo(N, N->getOpcode(),
829 CurDAG->getVTList(OpTy), {Op});
830 ReplaceNode(T, Op.getNode());
831}
832
833void HexagonDAGToDAGISel::SelectP2D(SDNode *N) {
834 MVT ResTy = N->getValueType(0).getSimpleVT();
835 SDNode *T = CurDAG->getMachineNode(Hexagon::C2_mask, SDLoc(N), ResTy,
836 N->getOperand(0));
837 ReplaceNode(N, T);
838}
839
840void HexagonDAGToDAGISel::SelectD2P(SDNode *N) {
841 const SDLoc &dl(N);
842 MVT ResTy = N->getValueType(0).getSimpleVT();
843 SDValue Zero = CurDAG->getTargetConstant(0, dl, MVT::i32);
844 SDNode *T = CurDAG->getMachineNode(Hexagon::A4_vcmpbgtui, dl, ResTy,
845 N->getOperand(0), Zero);
846 ReplaceNode(N, T);
847}
848
849void HexagonDAGToDAGISel::SelectV2Q(SDNode *N) {
850 const SDLoc &dl(N);
851 MVT ResTy = N->getValueType(0).getSimpleVT();
852
853 SDValue C = CurDAG->getTargetConstant(-1, dl, MVT::i32);
854 SDNode *R = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, C);
855 SDNode *T = CurDAG->getMachineNode(Hexagon::V6_vandvrt, dl, ResTy,
856 N->getOperand(0), SDValue(R,0));
857 ReplaceNode(N, T);
858}
859
860void HexagonDAGToDAGISel::SelectQ2V(SDNode *N) {
861 const SDLoc &dl(N);
862 MVT ResTy = N->getValueType(0).getSimpleVT();
863
864 SDValue C = CurDAG->getTargetConstant(-1, dl, MVT::i32);
865 SDNode *R = CurDAG->getMachineNode(Hexagon::A2_tfrsi, dl, MVT::i32, C);
866 SDNode *T = CurDAG->getMachineNode(Hexagon::V6_vandqrt, dl, ResTy,
867 N->getOperand(0), SDValue(R,0));
868 ReplaceNode(N, T);
869}
870
871void HexagonDAGToDAGISel::Select(SDNode *N) {
872 if (N->isMachineOpcode())
873 return N->setNodeId(-1); // Already selected.
874
875 switch (N->getOpcode()) {
876 case ISD::Constant: return SelectConstant(N);
877 case ISD::ConstantFP: return SelectConstantFP(N);
878 case ISD::FrameIndex: return SelectFrameIndex(N);
879 case ISD::SHL: return SelectSHL(N);
880 case ISD::LOAD: return SelectLoad(N);
881 case ISD::STORE: return SelectStore(N);
882 case ISD::INTRINSIC_W_CHAIN: return SelectIntrinsicWChain(N);
883 case ISD::INTRINSIC_WO_CHAIN: return SelectIntrinsicWOChain(N);
884
885 case HexagonISD::ADDC:
886 case HexagonISD::SUBC: return SelectAddSubCarry(N);
887 case HexagonISD::VALIGN: return SelectVAlign(N);
888 case HexagonISD::VALIGNADDR: return SelectVAlignAddr(N);
889 case HexagonISD::TYPECAST: return SelectTypecast(N);
890 case HexagonISD::P2D: return SelectP2D(N);
891 case HexagonISD::D2P: return SelectD2P(N);
892 case HexagonISD::Q2V: return SelectQ2V(N);
893 case HexagonISD::V2Q: return SelectV2Q(N);
894 }
895
896 if (HST->useHVXOps()) {
897 switch (N->getOpcode()) {
898 case ISD::VECTOR_SHUFFLE: return SelectHvxShuffle(N);
899 case HexagonISD::VROR: return SelectHvxRor(N);
900 }
901 }
902
903 SelectCode(N);
904}
905
906bool HexagonDAGToDAGISel::
907SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
908 std::vector<SDValue> &OutOps) {
909 SDValue Inp = Op, Res;
910
911 switch (ConstraintID) {
912 default:
913 return true;
914 case InlineAsm::Constraint_i:
915 case InlineAsm::Constraint_o: // Offsetable.
916 case InlineAsm::Constraint_v: // Not offsetable.
917 case InlineAsm::Constraint_m: // Memory.
918 if (SelectAddrFI(Inp, Res))
919 OutOps.push_back(Res);
920 else
921 OutOps.push_back(Inp);
922 break;
923 }
924
925 OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32));
926 return false;
927}
928
929
930static bool isMemOPCandidate(SDNode *I, SDNode *U) {
931 // I is an operand of U. Check if U is an arithmetic (binary) operation
932 // usable in a memop, where the other operand is a loaded value, and the
933 // result of U is stored in the same location.
934
935 if (!U->hasOneUse())
936 return false;
937 unsigned Opc = U->getOpcode();
938 switch (Opc) {
939 case ISD::ADD:
940 case ISD::SUB:
941 case ISD::AND:
942 case ISD::OR:
943 break;
944 default:
945 return false;
946 }
947
948 SDValue S0 = U->getOperand(0);
949 SDValue S1 = U->getOperand(1);
950 SDValue SY = (S0.getNode() == I) ? S1 : S0;
951
952 SDNode *UUse = *U->use_begin();
953 if (UUse->getNumValues() != 1)
954 return false;
955
956 // Check if one of the inputs to U is a load instruction and the output
957 // is used by a store instruction. If so and they also have the same
958 // base pointer, then don't preoprocess this node sequence as it
959 // can be matched to a memop.
960 SDNode *SYNode = SY.getNode();
961 if (UUse->getOpcode() == ISD::STORE && SYNode->getOpcode() == ISD::LOAD) {
962 SDValue LDBasePtr = cast<MemSDNode>(SYNode)->getBasePtr();
963 SDValue STBasePtr = cast<MemSDNode>(UUse)->getBasePtr();
964 if (LDBasePtr == STBasePtr)
965 return true;
966 }
967 return false;
968}
969
970
971// Transform: (or (select c x 0) z) -> (select c (or x z) z)
972// (or (select c 0 y) z) -> (select c z (or y z))
973void HexagonDAGToDAGISel::ppSimplifyOrSelect0(std::vector<SDNode*> &&Nodes) {
974 SelectionDAG &DAG = *CurDAG;
975
976 for (auto I : Nodes) {
977 if (I->getOpcode() != ISD::OR)
978 continue;
979
980 auto IsZero = [] (const SDValue &V) -> bool {
981 if (ConstantSDNode *SC = dyn_cast<ConstantSDNode>(V.getNode()))
982 return SC->isNullValue();
983 return false;
984 };
985 auto IsSelect0 = [IsZero] (const SDValue &Op) -> bool {
986 if (Op.getOpcode() != ISD::SELECT)
987 return false;
988 return IsZero(Op.getOperand(1)) || IsZero(Op.getOperand(2));
989 };
990
991 SDValue N0 = I->getOperand(0), N1 = I->getOperand(1);
992 EVT VT = I->getValueType(0);
993 bool SelN0 = IsSelect0(N0);
994 SDValue SOp = SelN0 ? N0 : N1;
995 SDValue VOp = SelN0 ? N1 : N0;
996
997 if (SOp.getOpcode() == ISD::SELECT && SOp.getNode()->hasOneUse()) {
998 SDValue SC = SOp.getOperand(0);
999 SDValue SX = SOp.getOperand(1);
1000 SDValue SY = SOp.getOperand(2);
1001 SDLoc DLS = SOp;
1002 if (IsZero(SY)) {
1003 SDValue NewOr = DAG.getNode(ISD::OR, DLS, VT, SX, VOp);
1004 SDValue NewSel = DAG.getNode(ISD::SELECT, DLS, VT, SC, NewOr, VOp);
1005 DAG.ReplaceAllUsesWith(I, NewSel.getNode());
1006 } else if (IsZero(SX)) {
1007 SDValue NewOr = DAG.getNode(ISD::OR, DLS, VT, SY, VOp);
1008 SDValue NewSel = DAG.getNode(ISD::SELECT, DLS, VT, SC, VOp, NewOr);
1009 DAG.ReplaceAllUsesWith(I, NewSel.getNode());
1010 }
1011 }
1012 }
1013}
1014
1015// Transform: (store ch val (add x (add (shl y c) e)))
1016// to: (store ch val (add x (shl (add y d) c))),
1017// where e = (shl d c) for some integer d.
1018// The purpose of this is to enable generation of loads/stores with
1019// shifted addressing mode, i.e. mem(x+y<<#c). For that, the shift
1020// value c must be 0, 1 or 2.
1021void HexagonDAGToDAGISel::ppAddrReorderAddShl(std::vector<SDNode*> &&Nodes) {
1022 SelectionDAG &DAG = *CurDAG;
1023
1024 for (auto I : Nodes) {
1025 if (I->getOpcode() != ISD::STORE)
1026 continue;
1027
1028 // I matched: (store ch val Off)
1029 SDValue Off = I->getOperand(2);
1030 // Off needs to match: (add x (add (shl y c) (shl d c))))
1031 if (Off.getOpcode() != ISD::ADD)
1032 continue;
1033 // Off matched: (add x T0)
1034 SDValue T0 = Off.getOperand(1);
1035 // T0 needs to match: (add T1 T2):
1036 if (T0.getOpcode() != ISD::ADD)
1037 continue;
1038 // T0 matched: (add T1 T2)
1039 SDValue T1 = T0.getOperand(0);
1040 SDValue T2 = T0.getOperand(1);
1041 // T1 needs to match: (shl y c)
1042 if (T1.getOpcode() != ISD::SHL)
1043 continue;
1044 SDValue C = T1.getOperand(1);
1045 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(C.getNode());
1046 if (CN == nullptr)
1047 continue;
1048 unsigned CV = CN->getZExtValue();
1049 if (CV > 2)
1050 continue;
1051 // T2 needs to match e, where e = (shl d c) for some d.
1052 ConstantSDNode *EN = dyn_cast<ConstantSDNode>(T2.getNode());
1053 if (EN == nullptr)
1054 continue;
1055 unsigned EV = EN->getZExtValue();
1056 if (EV % (1 << CV) != 0)
1057 continue;
1058 unsigned DV = EV / (1 << CV);
1059
1060 // Replace T0 with: (shl (add y d) c)
1061 SDLoc DL = SDLoc(I);
1062 EVT VT = T0.getValueType();
1063 SDValue D = DAG.getConstant(DV, DL, VT);
1064 // NewAdd = (add y d)
1065 SDValue NewAdd = DAG.getNode(ISD::ADD, DL, VT, T1.getOperand(0), D);
1066 // NewShl = (shl NewAdd c)
1067 SDValue NewShl = DAG.getNode(ISD::SHL, DL, VT, NewAdd, C);
1068 ReplaceNode(T0.getNode(), NewShl.getNode());
1069 }
1070}
1071
1072// Transform: (load ch (add x (and (srl y c) Mask)))
1073// to: (load ch (add x (shl (srl y d) d-c)))
1074// where
1075// Mask = 00..0 111..1 0.0
1076// | | +-- d-c 0s, and d-c is 0, 1 or 2.
1077// | +-------- 1s
1078// +-------------- at most c 0s
1079// Motivating example:
1080// DAG combiner optimizes (add x (shl (srl y 5) 2))
1081// to (add x (and (srl y 3) 1FFFFFFC))
1082// which results in a constant-extended and(##...,lsr). This transformation
1083// undoes this simplification for cases where the shl can be folded into
1084// an addressing mode.
1085void HexagonDAGToDAGISel::ppAddrRewriteAndSrl(std::vector<SDNode*> &&Nodes) {
1086 SelectionDAG &DAG = *CurDAG;
1087
1088 for (SDNode *N : Nodes) {
1089 unsigned Opc = N->getOpcode();
1090 if (Opc != ISD::LOAD && Opc != ISD::STORE)
2
Assuming 'Opc' is equal to LOAD
1091 continue;
1092 SDValue Addr = Opc == ISD::LOAD ? N->getOperand(1) : N->getOperand(2);
3
'?' condition is true
1093 // Addr must match: (add x T0)
1094 if (Addr.getOpcode() != ISD::ADD)
4
Assuming the condition is false
5
Taking false branch
1095 continue;
1096 SDValue T0 = Addr.getOperand(1);
1097 // T0 must match: (and T1 Mask)
1098 if (T0.getOpcode() != ISD::AND)
6
Assuming the condition is false
7
Taking false branch
1099 continue;
1100
1101 // We have an AND.
1102 //
1103 // Check the first operand. It must be: (srl y c).
1104 SDValue S = T0.getOperand(0);
1105 if (S.getOpcode() != ISD::SRL)
8
Assuming the condition is false
9
Taking false branch
1106 continue;
1107 ConstantSDNode *SN = dyn_cast<ConstantSDNode>(S.getOperand(1).getNode());
1108 if (SN == nullptr)
10
Taking false branch
1109 continue;
1110 if (SN->getAPIntValue().getBitWidth() != 32)
11
Assuming the condition is false
12
Taking false branch
1111 continue;
1112 uint32_t CV = SN->getZExtValue();
1113
1114 // Check the second operand: the supposed mask.
1115 ConstantSDNode *MN = dyn_cast<ConstantSDNode>(T0.getOperand(1).getNode());
1116 if (MN == nullptr)
13
Taking false branch
1117 continue;
1118 if (MN->getAPIntValue().getBitWidth() != 32)
14
Assuming the condition is false
15
Taking false branch
1119 continue;
1120 uint32_t Mask = MN->getZExtValue();
1121 // Examine the mask.
1122 uint32_t TZ = countTrailingZeros(Mask);
16
Calling 'countTrailingZeros<unsigned int>'
23
Returning from 'countTrailingZeros<unsigned int>'
24
'TZ' initialized to 32
1123 uint32_t M1 = countTrailingOnes(Mask >> TZ);
25
The result of the right shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t'
1124 uint32_t LZ = countLeadingZeros(Mask);
1125 // Trailing zeros + middle ones + leading zeros must equal the width.
1126 if (TZ + M1 + LZ != 32)
1127 continue;
1128 // The number of trailing zeros will be encoded in the addressing mode.
1129 if (TZ > 2)
1130 continue;
1131 // The number of leading zeros must be at most c.
1132 if (LZ > CV)
1133 continue;
1134
1135 // All looks good.
1136 SDValue Y = S.getOperand(0);
1137 EVT VT = Addr.getValueType();
1138 SDLoc dl(S);
1139 // TZ = D-C, so D = TZ+C.
1140 SDValue D = DAG.getConstant(TZ+CV, dl, VT);
1141 SDValue DC = DAG.getConstant(TZ, dl, VT);
1142 SDValue NewSrl = DAG.getNode(ISD::SRL, dl, VT, Y, D);
1143 SDValue NewShl = DAG.getNode(ISD::SHL, dl, VT, NewSrl, DC);
1144 ReplaceNode(T0.getNode(), NewShl.getNode());
1145 }
1146}
1147
1148// Transform: (op ... (zext i1 c) ...) -> (select c (op ... 0 ...)
1149// (op ... 1 ...))
1150void HexagonDAGToDAGISel::ppHoistZextI1(std::vector<SDNode*> &&Nodes) {
1151 SelectionDAG &DAG = *CurDAG;
1152
1153 for (SDNode *N : Nodes) {
1154 unsigned Opc = N->getOpcode();
1155 if (Opc != ISD::ZERO_EXTEND)
1156 continue;
1157 SDValue OpI1 = N->getOperand(0);
1158 EVT OpVT = OpI1.getValueType();
1159 if (!OpVT.isSimple() || OpVT.getSimpleVT() != MVT::i1)
1160 continue;
1161 for (auto I = N->use_begin(), E = N->use_end(); I != E; ++I) {
1162 SDNode *U = *I;
1163 if (U->getNumValues() != 1)
1164 continue;
1165 EVT UVT = U->getValueType(0);
1166 if (!UVT.isSimple() || !UVT.isInteger() || UVT.getSimpleVT() == MVT::i1)
1167 continue;
1168 if (isMemOPCandidate(N, U))
1169 continue;
1170
1171 // Potentially simplifiable operation.
1172 unsigned I1N = I.getOperandNo();
1173 SmallVector<SDValue,2> Ops(U->getNumOperands());
1174 for (unsigned i = 0, n = U->getNumOperands(); i != n; ++i)
1175 Ops[i] = U->getOperand(i);
1176 EVT BVT = Ops[I1N].getValueType();
1177
1178 SDLoc dl(U);
1179 SDValue C0 = DAG.getConstant(0, dl, BVT);
1180 SDValue C1 = DAG.getConstant(1, dl, BVT);
1181 SDValue If0, If1;
1182
1183 if (isa<MachineSDNode>(U)) {
1184 unsigned UseOpc = U->getMachineOpcode();
1185 Ops[I1N] = C0;
1186 If0 = SDValue(DAG.getMachineNode(UseOpc, dl, UVT, Ops), 0);
1187 Ops[I1N] = C1;
1188 If1 = SDValue(DAG.getMachineNode(UseOpc, dl, UVT, Ops), 0);
1189 } else {
1190 unsigned UseOpc = U->getOpcode();
1191 Ops[I1N] = C0;
1192 If0 = DAG.getNode(UseOpc, dl, UVT, Ops);
1193 Ops[I1N] = C1;
1194 If1 = DAG.getNode(UseOpc, dl, UVT, Ops);
1195 }
1196 SDValue Sel = DAG.getNode(ISD::SELECT, dl, UVT, OpI1, If1, If0);
1197 DAG.ReplaceAllUsesWith(U, Sel.getNode());
1198 }
1199 }
1200}
1201
1202void HexagonDAGToDAGISel::PreprocessISelDAG() {
1203 // Repack all nodes before calling each preprocessing function,
1204 // because each of them can modify the set of nodes.
1205 auto getNodes = [this] () -> std::vector<SDNode*> {
1206 std::vector<SDNode*> T;
1207 T.reserve(CurDAG->allnodes_size());
1208 for (SDNode &N : CurDAG->allnodes())
1209 T.push_back(&N);
1210 return T;
1211 };
1212
1213 // Transform: (or (select c x 0) z) -> (select c (or x z) z)
1214 // (or (select c 0 y) z) -> (select c z (or y z))
1215 ppSimplifyOrSelect0(getNodes());
1216
1217 // Transform: (store ch val (add x (add (shl y c) e)))
1218 // to: (store ch val (add x (shl (add y d) c))),
1219 // where e = (shl d c) for some integer d.
1220 // The purpose of this is to enable generation of loads/stores with
1221 // shifted addressing mode, i.e. mem(x+y<<#c). For that, the shift
1222 // value c must be 0, 1 or 2.
1223 ppAddrReorderAddShl(getNodes());
1224
1225 // Transform: (load ch (add x (and (srl y c) Mask)))
1226 // to: (load ch (add x (shl (srl y d) d-c)))
1227 // where
1228 // Mask = 00..0 111..1 0.0
1229 // | | +-- d-c 0s, and d-c is 0, 1 or 2.
1230 // | +-------- 1s
1231 // +-------------- at most c 0s
1232 // Motivating example:
1233 // DAG combiner optimizes (add x (shl (srl y 5) 2))
1234 // to (add x (and (srl y 3) 1FFFFFFC))
1235 // which results in a constant-extended and(##...,lsr). This transformation
1236 // undoes this simplification for cases where the shl can be folded into
1237 // an addressing mode.
1238 ppAddrRewriteAndSrl(getNodes());
1
Calling 'HexagonDAGToDAGISel::ppAddrRewriteAndSrl'
1239
1240 // Transform: (op ... (zext i1 c) ...) -> (select c (op ... 0 ...)
1241 // (op ... 1 ...))
1242 ppHoistZextI1(getNodes());
1243
1244 DEBUG_WITH_TYPE("isel", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1245 dbgs() << "Preprocessed (Hexagon) selection DAG:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1246 CurDAG->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
1247 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Preprocessed (Hexagon) selection DAG:"
; CurDAG->dump(); }; } } while (false)
;
1248
1249 if (EnableAddressRebalancing) {
1250 rebalanceAddressTrees();
1251
1252 DEBUG_WITH_TYPE("isel", {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1253 dbgs() << "Address tree balanced selection DAG:";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1254 CurDAG->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
1255 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("isel")) { { dbgs() << "Address tree balanced selection DAG:"
; CurDAG->dump(); }; } } while (false)
;
1256 }
1257}
1258
1259void HexagonDAGToDAGISel::EmitFunctionEntryCode() {
1260 auto &HST = static_cast<const HexagonSubtarget&>(MF->getSubtarget());
1261 auto &HFI = *HST.getFrameLowering();
1262 if (!HFI.needsAligna(*MF))
1263 return;
1264
1265 MachineFrameInfo &MFI = MF->getFrameInfo();
1266 MachineBasicBlock *EntryBB = &MF->front();
1267 unsigned AR = FuncInfo->CreateReg(MVT::i32);
1268 unsigned MaxA = MFI.getMaxAlignment();
1269 BuildMI(EntryBB, DebugLoc(), HII->get(Hexagon::PS_aligna), AR)
1270 .addImm(MaxA);
1271 MF->getInfo<HexagonMachineFunctionInfo>()->setStackAlignBaseVReg(AR);
1272}
1273
1274// Match a frame index that can be used in an addressing mode.
1275bool HexagonDAGToDAGISel::SelectAddrFI(SDValue &N, SDValue &R) {
1276 if (N.getOpcode() != ISD::FrameIndex)
1277 return false;
1278 auto &HFI = *HST->getFrameLowering();
1279 MachineFrameInfo &MFI = MF->getFrameInfo();
1280 int FX = cast<FrameIndexSDNode>(N)->getIndex();
1281 if (!MFI.isFixedObjectIndex(FX) && HFI.needsAligna(*MF))
1282 return false;
1283 R = CurDAG->getTargetFrameIndex(FX, MVT::i32);
1284 return true;
1285}
1286
1287inline bool HexagonDAGToDAGISel::SelectAddrGA(SDValue &N, SDValue &R) {
1288 return SelectGlobalAddress(N, R, false, 0);
1289}
1290
1291inline bool HexagonDAGToDAGISel::SelectAddrGP(SDValue &N, SDValue &R) {
1292 return SelectGlobalAddress(N, R, true, 0);
1293}
1294
1295inline bool HexagonDAGToDAGISel::SelectAnyImm(SDValue &N, SDValue &R) {
1296 return SelectAnyImmediate(N, R, 0);
1297}
1298
1299inline bool HexagonDAGToDAGISel::SelectAnyImm0(SDValue &N, SDValue &R) {
1300 return SelectAnyImmediate(N, R, 0);
1301}
1302inline bool HexagonDAGToDAGISel::SelectAnyImm1(SDValue &N, SDValue &R) {
1303 return SelectAnyImmediate(N, R, 1);
1304}
1305inline bool HexagonDAGToDAGISel::SelectAnyImm2(SDValue &N, SDValue &R) {
1306 return SelectAnyImmediate(N, R, 2);
1307}
1308inline bool HexagonDAGToDAGISel::SelectAnyImm3(SDValue &N, SDValue &R) {
1309 return SelectAnyImmediate(N, R, 3);
1310}
1311
1312inline bool HexagonDAGToDAGISel::SelectAnyInt(SDValue &N, SDValue &R) {
1313 EVT T = N.getValueType();
1314 if (!T.isInteger() || T.getSizeInBits() != 32 || !isa<ConstantSDNode>(N))
1315 return false;
1316 R = N;
1317 return true;
1318}
1319
1320bool HexagonDAGToDAGISel::SelectAnyImmediate(SDValue &N, SDValue &R,
1321 uint32_t LogAlign) {
1322 auto IsAligned = [LogAlign] (uint64_t V) -> bool {
1323 return alignTo(V, (uint64_t)1 << LogAlign) == V;
1324 };
1325
1326 switch (N.getOpcode()) {
1327 case ISD::Constant: {
1328 if (N.getValueType() != MVT::i32)
1329 return false;
1330 int32_t V = cast<const ConstantSDNode>(N)->getZExtValue();
1331 if (!IsAligned(V))
1332 return false;
1333 R = CurDAG->getTargetConstant(V, SDLoc(N), N.getValueType());
1334 return true;
1335 }
1336 case HexagonISD::JT:
1337 case HexagonISD::CP:
1338 // These are assumed to always be aligned at least 8-byte boundary.
1339 if (LogAlign > 3)
1340 return false;
1341 R = N.getOperand(0);
1342 return true;
1343 case ISD::ExternalSymbol:
1344 // Symbols may be aligned at any boundary.
1345 if (LogAlign > 0)
1346 return false;
1347 R = N;
1348 return true;
1349 case ISD::BlockAddress:
1350 // Block address is always aligned at least 4-byte boundary.
1351 if (LogAlign > 2 || !IsAligned(cast<BlockAddressSDNode>(N)->getOffset()))
1352 return false;
1353 R = N;
1354 return true;
1355 }
1356
1357 if (SelectGlobalAddress(N, R, false, LogAlign) ||
1358 SelectGlobalAddress(N, R, true, LogAlign))
1359 return true;
1360
1361 return false;
1362}
1363
1364bool HexagonDAGToDAGISel::SelectGlobalAddress(SDValue &N, SDValue &R,
1365 bool UseGP, uint32_t LogAlign) {
1366 auto IsAligned = [LogAlign] (uint64_t V) -> bool {
1367 return alignTo(V, (uint64_t)1 << LogAlign) == V;
1368 };
1369
1370 switch (N.getOpcode()) {
1371 case ISD::ADD: {
1372 SDValue N0 = N.getOperand(0);
1373 SDValue N1 = N.getOperand(1);
1374 unsigned GAOpc = N0.getOpcode();
1375 if (UseGP && GAOpc != HexagonISD::CONST32_GP)
1376 return false;
1377 if (!UseGP && GAOpc != HexagonISD::CONST32)
1378 return false;
1379 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N1)) {
1380 SDValue Addr = N0.getOperand(0);
1381 // For the purpose of alignment, sextvalue and zextvalue are the same.
1382 if (!IsAligned(Const->getZExtValue()))
1383 return false;
1384 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Addr)) {
1385 if (GA->getOpcode() == ISD::TargetGlobalAddress) {
1386 uint64_t NewOff = GA->getOffset() + (uint64_t)Const->getSExtValue();
1387 R = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(Const),
1388 N.getValueType(), NewOff);
1389 return true;
1390 }
1391 }
1392 }
1393 break;
1394 }
1395 case HexagonISD::CP:
1396 case HexagonISD::JT:
1397 case HexagonISD::CONST32:
1398 // The operand(0) of CONST32 is TargetGlobalAddress, which is what we
1399 // want in the instruction.
1400 if (!UseGP)
1401 R = N.getOperand(0);
1402 return !UseGP;
1403 case HexagonISD::CONST32_GP:
1404 if (UseGP)
1405 R = N.getOperand(0);
1406 return UseGP;
1407 default:
1408 return false;
1409 }
1410
1411 return false;
1412}
1413
1414bool HexagonDAGToDAGISel::DetectUseSxtw(SDValue &N, SDValue &R) {
1415 // This (complex pattern) function is meant to detect a sign-extension
1416 // i32->i64 on a per-operand basis. This would allow writing single
1417 // patterns that would cover a number of combinations of different ways
1418 // a sign-extensions could be written. For example:
1419 // (mul (DetectUseSxtw x) (DetectUseSxtw y)) -> (M2_dpmpyss_s0 x y)
1420 // could match either one of these:
1421 // (mul (sext x) (sext_inreg y))
1422 // (mul (sext-load *p) (sext_inreg y))
1423 // (mul (sext_inreg x) (sext y))
1424 // etc.
1425 //
1426 // The returned value will have type i64 and its low word will
1427 // contain the value being extended. The high bits are not specified.
1428 // The returned type is i64 because the original type of N was i64,
1429 // but the users of this function should only use the low-word of the
1430 // result, e.g.
1431 // (mul sxtw:x, sxtw:y) -> (M2_dpmpyss_s0 (LoReg sxtw:x), (LoReg sxtw:y))
1432
1433 if (N.getValueType() != MVT::i64)
1434 return false;
1435 unsigned Opc = N.getOpcode();
1436 switch (Opc) {
1437 case ISD::SIGN_EXTEND:
1438 case ISD::SIGN_EXTEND_INREG: {
1439 // sext_inreg has the source type as a separate operand.
1440 EVT T = Opc == ISD::SIGN_EXTEND
1441 ? N.getOperand(0).getValueType()
1442 : cast<VTSDNode>(N.getOperand(1))->getVT();
1443 unsigned SW = T.getSizeInBits();
1444 if (SW == 32)
1445 R = N.getOperand(0);
1446 else if (SW < 32)
1447 R = N;
1448 else
1449 return false;
1450 break;
1451 }
1452 case ISD::LOAD: {
1453 LoadSDNode *L = cast<LoadSDNode>(N);
1454 if (L->getExtensionType() != ISD::SEXTLOAD)
1455 return false;
1456 // All extending loads extend to i32, so even if the value in
1457 // memory is shorter than 32 bits, it will be i32 after the load.
1458 if (L->getMemoryVT().getSizeInBits() > 32)
1459 return false;
1460 R = N;
1461 break;
1462 }
1463 case ISD::SRA: {
1464 auto *S = dyn_cast<ConstantSDNode>(N.getOperand(1));
1465 if (!S || S->getZExtValue() != 32)
1466 return false;
1467 R = N;
1468 break;
1469 }
1470 default:
1471 return false;
1472 }
1473 EVT RT = R.getValueType();
1474 if (RT == MVT::i64)
1475 return true;
1476 assert(RT == MVT::i32)((RT == MVT::i32) ? static_cast<void> (0) : __assert_fail
("RT == MVT::i32", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1476, __PRETTY_FUNCTION__))
;
1477 // This is only to produce a value of type i64. Do not rely on the
1478 // high bits produced by this.
1479 const SDLoc &dl(N);
1480 SDValue Ops[] = {
1481 CurDAG->getTargetConstant(Hexagon::DoubleRegsRegClassID, dl, MVT::i32),
1482 R, CurDAG->getTargetConstant(Hexagon::isub_hi, dl, MVT::i32),
1483 R, CurDAG->getTargetConstant(Hexagon::isub_lo, dl, MVT::i32)
1484 };
1485 SDNode *T = CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl,
1486 MVT::i64, Ops);
1487 R = SDValue(T, 0);
1488 return true;
1489}
1490
1491bool HexagonDAGToDAGISel::keepsLowBits(const SDValue &Val, unsigned NumBits,
1492 SDValue &Src) {
1493 unsigned Opc = Val.getOpcode();
1494 switch (Opc) {
1495 case ISD::SIGN_EXTEND:
1496 case ISD::ZERO_EXTEND:
1497 case ISD::ANY_EXTEND: {
1498 const SDValue &Op0 = Val.getOperand(0);
1499 EVT T = Op0.getValueType();
1500 if (T.isInteger() && T.getSizeInBits() == NumBits) {
1501 Src = Op0;
1502 return true;
1503 }
1504 break;
1505 }
1506 case ISD::SIGN_EXTEND_INREG:
1507 case ISD::AssertSext:
1508 case ISD::AssertZext:
1509 if (Val.getOperand(0).getValueType().isInteger()) {
1510 VTSDNode *T = cast<VTSDNode>(Val.getOperand(1));
1511 if (T->getVT().getSizeInBits() == NumBits) {
1512 Src = Val.getOperand(0);
1513 return true;
1514 }
1515 }
1516 break;
1517 case ISD::AND: {
1518 // Check if this is an AND with NumBits of lower bits set to 1.
1519 uint64_t Mask = (1 << NumBits) - 1;
1520 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
1521 if (C->getZExtValue() == Mask) {
1522 Src = Val.getOperand(1);
1523 return true;
1524 }
1525 }
1526 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
1527 if (C->getZExtValue() == Mask) {
1528 Src = Val.getOperand(0);
1529 return true;
1530 }
1531 }
1532 break;
1533 }
1534 case ISD::OR:
1535 case ISD::XOR: {
1536 // OR/XOR with the lower NumBits bits set to 0.
1537 uint64_t Mask = (1 << NumBits) - 1;
1538 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
1539 if ((C->getZExtValue() & Mask) == 0) {
1540 Src = Val.getOperand(1);
1541 return true;
1542 }
1543 }
1544 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
1545 if ((C->getZExtValue() & Mask) == 0) {
1546 Src = Val.getOperand(0);
1547 return true;
1548 }
1549 }
1550 }
1551 default:
1552 break;
1553 }
1554 return false;
1555}
1556
1557bool HexagonDAGToDAGISel::isAlignedMemNode(const MemSDNode *N) const {
1558 return N->getAlignment() >= N->getMemoryVT().getStoreSize();
1559}
1560
1561bool HexagonDAGToDAGISel::isSmallStackStore(const StoreSDNode *N) const {
1562 unsigned StackSize = MF->getFrameInfo().estimateStackSize(*MF);
1563 switch (N->getMemoryVT().getStoreSize()) {
1564 case 1:
1565 return StackSize <= 56; // 1*2^6 - 8
1566 case 2:
1567 return StackSize <= 120; // 2*2^6 - 8
1568 case 4:
1569 return StackSize <= 248; // 4*2^6 - 8
1570 default:
1571 return false;
1572 }
1573}
1574
1575// Return true when the given node fits in a positive half word.
1576bool HexagonDAGToDAGISel::isPositiveHalfWord(const SDNode *N) const {
1577 if (const ConstantSDNode *CN = dyn_cast<const ConstantSDNode>(N)) {
1578 int64_t V = CN->getSExtValue();
1579 return V > 0 && isInt<16>(V);
1580 }
1581 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
1582 const VTSDNode *VN = dyn_cast<const VTSDNode>(N->getOperand(1));
1583 return VN->getVT().getSizeInBits() <= 16;
1584 }
1585 return false;
1586}
1587
1588bool HexagonDAGToDAGISel::hasOneUse(const SDNode *N) const {
1589 return !CheckSingleUse || N->hasOneUse();
1590}
1591
1592////////////////////////////////////////////////////////////////////////////////
1593// Rebalancing of address calculation trees
1594
1595static bool isOpcodeHandled(const SDNode *N) {
1596 switch (N->getOpcode()) {
1597 case ISD::ADD:
1598 case ISD::MUL:
1599 return true;
1600 case ISD::SHL:
1601 // We only handle constant shifts because these can be easily flattened
1602 // into multiplications by 2^Op1.
1603 return isa<ConstantSDNode>(N->getOperand(1).getNode());
1604 default:
1605 return false;
1606 }
1607}
1608
1609/// Return the weight of an SDNode
1610int HexagonDAGToDAGISel::getWeight(SDNode *N) {
1611 if (!isOpcodeHandled(N))
1612 return 1;
1613 assert(RootWeights.count(N) && "Cannot get weight of unseen root!")((RootWeights.count(N) && "Cannot get weight of unseen root!"
) ? static_cast<void> (0) : __assert_fail ("RootWeights.count(N) && \"Cannot get weight of unseen root!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1613, __PRETTY_FUNCTION__))
;
1614 assert(RootWeights[N] != -1 && "Cannot get weight of unvisited root!")((RootWeights[N] != -1 && "Cannot get weight of unvisited root!"
) ? static_cast<void> (0) : __assert_fail ("RootWeights[N] != -1 && \"Cannot get weight of unvisited root!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1614, __PRETTY_FUNCTION__))
;
1615 assert(RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!")((RootWeights[N] != -2 && "Cannot get weight of RAWU'd root!"
) ? static_cast<void> (0) : __assert_fail ("RootWeights[N] != -2 && \"Cannot get weight of RAWU'd root!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1615, __PRETTY_FUNCTION__))
;
1616 return RootWeights[N];
1617}
1618
1619int HexagonDAGToDAGISel::getHeight(SDNode *N) {
1620 if (!isOpcodeHandled(N))
1621 return 0;
1622 assert(RootWeights.count(N) && RootWeights[N] >= 0 &&((RootWeights.count(N) && RootWeights[N] >= 0 &&
"Cannot query height of unvisited/RAUW'd node!") ? static_cast
<void> (0) : __assert_fail ("RootWeights.count(N) && RootWeights[N] >= 0 && \"Cannot query height of unvisited/RAUW'd node!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1623, __PRETTY_FUNCTION__))
1623 "Cannot query height of unvisited/RAUW'd node!")((RootWeights.count(N) && RootWeights[N] >= 0 &&
"Cannot query height of unvisited/RAUW'd node!") ? static_cast
<void> (0) : __assert_fail ("RootWeights.count(N) && RootWeights[N] >= 0 && \"Cannot query height of unvisited/RAUW'd node!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1623, __PRETTY_FUNCTION__))
;
1624 return RootHeights[N];
1625}
1626
1627namespace {
1628struct WeightedLeaf {
1629 SDValue Value;
1630 int Weight;
1631 int InsertionOrder;
1632
1633 WeightedLeaf() : Value(SDValue()) { }
1634
1635 WeightedLeaf(SDValue Value, int Weight, int InsertionOrder) :
1636 Value(Value), Weight(Weight), InsertionOrder(InsertionOrder) {
1637 assert(Weight >= 0 && "Weight must be >= 0")((Weight >= 0 && "Weight must be >= 0") ? static_cast
<void> (0) : __assert_fail ("Weight >= 0 && \"Weight must be >= 0\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1637, __PRETTY_FUNCTION__))
;
1638 }
1639
1640 static bool Compare(const WeightedLeaf &A, const WeightedLeaf &B) {
1641 assert(A.Value.getNode() && B.Value.getNode())((A.Value.getNode() && B.Value.getNode()) ? static_cast
<void> (0) : __assert_fail ("A.Value.getNode() && B.Value.getNode()"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1641, __PRETTY_FUNCTION__))
;
1642 return A.Weight == B.Weight ?
1643 (A.InsertionOrder > B.InsertionOrder) :
1644 (A.Weight > B.Weight);
1645 }
1646};
1647
1648/// A specialized priority queue for WeigthedLeaves. It automatically folds
1649/// constants and allows removal of non-top elements while maintaining the
1650/// priority order.
1651class LeafPrioQueue {
1652 SmallVector<WeightedLeaf, 8> Q;
1653 bool HaveConst;
1654 WeightedLeaf ConstElt;
1655 unsigned Opcode;
1656
1657public:
1658 bool empty() {
1659 return (!HaveConst && Q.empty());
1660 }
1661
1662 size_t size() {
1663 return Q.size() + HaveConst;
1664 }
1665
1666 bool hasConst() {
1667 return HaveConst;
1668 }
1669
1670 const WeightedLeaf &top() {
1671 if (HaveConst)
1672 return ConstElt;
1673 return Q.front();
1674 }
1675
1676 WeightedLeaf pop() {
1677 if (HaveConst) {
1678 HaveConst = false;
1679 return ConstElt;
1680 }
1681 std::pop_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1682 return Q.pop_back_val();
1683 }
1684
1685 void push(WeightedLeaf L, bool SeparateConst=true) {
1686 if (!HaveConst && SeparateConst && isa<ConstantSDNode>(L.Value)) {
1687 if (Opcode == ISD::MUL &&
1688 cast<ConstantSDNode>(L.Value)->getSExtValue() == 1)
1689 return;
1690 if (Opcode == ISD::ADD &&
1691 cast<ConstantSDNode>(L.Value)->getSExtValue() == 0)
1692 return;
1693
1694 HaveConst = true;
1695 ConstElt = L;
1696 } else {
1697 Q.push_back(L);
1698 std::push_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1699 }
1700 }
1701
1702 /// Push L to the bottom of the queue regardless of its weight. If L is
1703 /// constant, it will not be folded with other constants in the queue.
1704 void pushToBottom(WeightedLeaf L) {
1705 L.Weight = 1000;
1706 push(L, false);
1707 }
1708
1709 /// Search for a SHL(x, [<=MaxAmount]) subtree in the queue, return the one of
1710 /// lowest weight and remove it from the queue.
1711 WeightedLeaf findSHL(uint64_t MaxAmount);
1712
1713 WeightedLeaf findMULbyConst();
1714
1715 LeafPrioQueue(unsigned Opcode) :
1716 HaveConst(false), Opcode(Opcode) { }
1717};
1718} // end anonymous namespace
1719
1720WeightedLeaf LeafPrioQueue::findSHL(uint64_t MaxAmount) {
1721 int ResultPos;
1722 WeightedLeaf Result;
1723
1724 for (int Pos = 0, End = Q.size(); Pos != End; ++Pos) {
1725 const WeightedLeaf &L = Q[Pos];
1726 const SDValue &Val = L.Value;
1727 if (Val.getOpcode() != ISD::SHL ||
1728 !isa<ConstantSDNode>(Val.getOperand(1)) ||
1729 Val.getConstantOperandVal(1) > MaxAmount)
1730 continue;
1731 if (!Result.Value.getNode() || Result.Weight > L.Weight ||
1732 (Result.Weight == L.Weight && Result.InsertionOrder > L.InsertionOrder))
1733 {
1734 Result = L;
1735 ResultPos = Pos;
1736 }
1737 }
1738
1739 if (Result.Value.getNode()) {
1740 Q.erase(&Q[ResultPos]);
1741 std::make_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1742 }
1743
1744 return Result;
1745}
1746
1747WeightedLeaf LeafPrioQueue::findMULbyConst() {
1748 int ResultPos;
1749 WeightedLeaf Result;
1750
1751 for (int Pos = 0, End = Q.size(); Pos != End; ++Pos) {
1752 const WeightedLeaf &L = Q[Pos];
1753 const SDValue &Val = L.Value;
1754 if (Val.getOpcode() != ISD::MUL ||
1755 !isa<ConstantSDNode>(Val.getOperand(1)) ||
1756 Val.getConstantOperandVal(1) > 127)
1757 continue;
1758 if (!Result.Value.getNode() || Result.Weight > L.Weight ||
1759 (Result.Weight == L.Weight && Result.InsertionOrder > L.InsertionOrder))
1760 {
1761 Result = L;
1762 ResultPos = Pos;
1763 }
1764 }
1765
1766 if (Result.Value.getNode()) {
1767 Q.erase(&Q[ResultPos]);
1768 std::make_heap(Q.begin(), Q.end(), WeightedLeaf::Compare);
1769 }
1770
1771 return Result;
1772}
1773
1774SDValue HexagonDAGToDAGISel::getMultiplierForSHL(SDNode *N) {
1775 uint64_t MulFactor = 1ull << N->getConstantOperandVal(1);
1776 return CurDAG->getConstant(MulFactor, SDLoc(N),
1777 N->getOperand(1).getValueType());
1778}
1779
1780/// @returns the value x for which 2^x is a factor of Val
1781static unsigned getPowerOf2Factor(SDValue Val) {
1782 if (Val.getOpcode() == ISD::MUL) {
1783 unsigned MaxFactor = 0;
1784 for (int i = 0; i < 2; ++i) {
1785 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(i));
1786 if (!C)
1787 continue;
1788 const APInt &CInt = C->getAPIntValue();
1789 if (CInt.getBoolValue())
1790 MaxFactor = CInt.countTrailingZeros();
1791 }
1792 return MaxFactor;
1793 }
1794 if (Val.getOpcode() == ISD::SHL) {
1795 if (!isa<ConstantSDNode>(Val.getOperand(1).getNode()))
1796 return 0;
1797 return (unsigned) Val.getConstantOperandVal(1);
1798 }
1799
1800 return 0;
1801}
1802
1803/// @returns true if V>>Amount will eliminate V's operation on its child
1804static bool willShiftRightEliminate(SDValue V, unsigned Amount) {
1805 if (V.getOpcode() == ISD::MUL) {
1806 SDValue Ops[] = { V.getOperand(0), V.getOperand(1) };
1807 for (int i = 0; i < 2; ++i)
1808 if (isa<ConstantSDNode>(Ops[i].getNode()) &&
1809 V.getConstantOperandVal(i) % (1ULL << Amount) == 0) {
1810 uint64_t NewConst = V.getConstantOperandVal(i) >> Amount;
1811 return (NewConst == 1);
1812 }
1813 } else if (V.getOpcode() == ISD::SHL) {
1814 return (Amount == V.getConstantOperandVal(1));
1815 }
1816
1817 return false;
1818}
1819
1820SDValue HexagonDAGToDAGISel::factorOutPowerOf2(SDValue V, unsigned Power) {
1821 SDValue Ops[] = { V.getOperand(0), V.getOperand(1) };
1822 if (V.getOpcode() == ISD::MUL) {
1823 for (int i=0; i < 2; ++i) {
1824 if (isa<ConstantSDNode>(Ops[i].getNode()) &&
1825 V.getConstantOperandVal(i) % ((uint64_t)1 << Power) == 0) {
1826 uint64_t NewConst = V.getConstantOperandVal(i) >> Power;
1827 if (NewConst == 1)
1828 return Ops[!i];
1829 Ops[i] = CurDAG->getConstant(NewConst,
1830 SDLoc(V), V.getValueType());
1831 break;
1832 }
1833 }
1834 } else if (V.getOpcode() == ISD::SHL) {
1835 uint64_t ShiftAmount = V.getConstantOperandVal(1);
1836 if (ShiftAmount == Power)
1837 return Ops[0];
1838 Ops[1] = CurDAG->getConstant(ShiftAmount - Power,
1839 SDLoc(V), V.getValueType());
1840 }
1841
1842 return CurDAG->getNode(V.getOpcode(), SDLoc(V), V.getValueType(), Ops);
1843}
1844
1845static bool isTargetConstant(const SDValue &V) {
1846 return V.getOpcode() == HexagonISD::CONST32 ||
1847 V.getOpcode() == HexagonISD::CONST32_GP;
1848}
1849
1850unsigned HexagonDAGToDAGISel::getUsesInFunction(const Value *V) {
1851 if (GAUsesInFunction.count(V))
1852 return GAUsesInFunction[V];
1853
1854 unsigned Result = 0;
1855 const Function &CurF = CurDAG->getMachineFunction().getFunction();
1856 for (const User *U : V->users()) {
1857 if (isa<Instruction>(U) &&
1858 cast<Instruction>(U)->getParent()->getParent() == &CurF)
1859 ++Result;
1860 }
1861
1862 GAUsesInFunction[V] = Result;
1863
1864 return Result;
1865}
1866
1867/// Note - After calling this, N may be dead. It may have been replaced by a
1868/// new node, so always use the returned value in place of N.
1869///
1870/// @returns The SDValue taking the place of N (which could be N if it is
1871/// unchanged)
1872SDValue HexagonDAGToDAGISel::balanceSubTree(SDNode *N, bool TopLevel) {
1873 assert(RootWeights.count(N) && "Cannot balance non-root node.")((RootWeights.count(N) && "Cannot balance non-root node."
) ? static_cast<void> (0) : __assert_fail ("RootWeights.count(N) && \"Cannot balance non-root node.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1873, __PRETTY_FUNCTION__))
;
1874 assert(RootWeights[N] != -2 && "This node was RAUW'd!")((RootWeights[N] != -2 && "This node was RAUW'd!") ? static_cast
<void> (0) : __assert_fail ("RootWeights[N] != -2 && \"This node was RAUW'd!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1874, __PRETTY_FUNCTION__))
;
1875 assert(!TopLevel || N->getOpcode() == ISD::ADD)((!TopLevel || N->getOpcode() == ISD::ADD) ? static_cast<
void> (0) : __assert_fail ("!TopLevel || N->getOpcode() == ISD::ADD"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1875, __PRETTY_FUNCTION__))
;
1876
1877 // Return early if this node was already visited
1878 if (RootWeights[N] != -1)
1879 return SDValue(N, 0);
1880
1881 assert(isOpcodeHandled(N))((isOpcodeHandled(N)) ? static_cast<void> (0) : __assert_fail
("isOpcodeHandled(N)", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 1881, __PRETTY_FUNCTION__))
;
1882
1883 SDValue Op0 = N->getOperand(0);
1884 SDValue Op1 = N->getOperand(1);
1885
1886 // Return early if the operands will remain unchanged or are all roots
1887 if ((!isOpcodeHandled(Op0.getNode()) || RootWeights.count(Op0.getNode())) &&
1888 (!isOpcodeHandled(Op1.getNode()) || RootWeights.count(Op1.getNode()))) {
1889 SDNode *Op0N = Op0.getNode();
1890 int Weight;
1891 if (isOpcodeHandled(Op0N) && RootWeights[Op0N] == -1) {
1892 Weight = getWeight(balanceSubTree(Op0N).getNode());
1893 // Weight = calculateWeight(Op0N);
1894 } else
1895 Weight = getWeight(Op0N);
1896
1897 SDNode *Op1N = N->getOperand(1).getNode(); // Op1 may have been RAUWd
1898 if (isOpcodeHandled(Op1N) && RootWeights[Op1N] == -1) {
1899 Weight += getWeight(balanceSubTree(Op1N).getNode());
1900 // Weight += calculateWeight(Op1N);
1901 } else
1902 Weight += getWeight(Op1N);
1903
1904 RootWeights[N] = Weight;
1905 RootHeights[N] = std::max(getHeight(N->getOperand(0).getNode()),
1906 getHeight(N->getOperand(1).getNode())) + 1;
1907
1908 LLVM_DEBUG(dbgs() << "--> No need to balance root (Weight=" << Weightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> No need to balance root (Weight="
<< Weight << " Height=" << RootHeights[N] <<
"): "; } } while (false)
1909 << " Height=" << RootHeights[N] << "): ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> No need to balance root (Weight="
<< Weight << " Height=" << RootHeights[N] <<
"): "; } } while (false)
;
1910 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
1911
1912 return SDValue(N, 0);
1913 }
1914
1915 LLVM_DEBUG(dbgs() << "** Balancing root node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "** Balancing root node: "
; } } while (false)
;
1916 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
1917
1918 unsigned NOpcode = N->getOpcode();
1919
1920 LeafPrioQueue Leaves(NOpcode);
1921 SmallVector<SDValue, 4> Worklist;
1922 Worklist.push_back(SDValue(N, 0));
1923
1924 // SHL nodes will be converted to MUL nodes
1925 if (NOpcode == ISD::SHL)
1926 NOpcode = ISD::MUL;
1927
1928 bool CanFactorize = false;
1929 WeightedLeaf Mul1, Mul2;
1930 unsigned MaxPowerOf2 = 0;
1931 WeightedLeaf GA;
1932
1933 // Do not try to factor out a shift if there is already a shift at the tip of
1934 // the tree.
1935 bool HaveTopLevelShift = false;
1936 if (TopLevel &&
1937 ((isOpcodeHandled(Op0.getNode()) && Op0.getOpcode() == ISD::SHL &&
1938 Op0.getConstantOperandVal(1) < 4) ||
1939 (isOpcodeHandled(Op1.getNode()) && Op1.getOpcode() == ISD::SHL &&
1940 Op1.getConstantOperandVal(1) < 4)))
1941 HaveTopLevelShift = true;
1942
1943 // Flatten the subtree into an ordered list of leaves; at the same time
1944 // determine whether the tree is already balanced.
1945 int InsertionOrder = 0;
1946 SmallDenseMap<SDValue, int> NodeHeights;
1947 bool Imbalanced = false;
1948 int CurrentWeight = 0;
1949 while (!Worklist.empty()) {
1950 SDValue Child = Worklist.pop_back_val();
1951
1952 if (Child.getNode() != N && RootWeights.count(Child.getNode())) {
1953 // CASE 1: Child is a root note
1954
1955 int Weight = RootWeights[Child.getNode()];
1956 if (Weight == -1) {
1957 Child = balanceSubTree(Child.getNode());
1958 // calculateWeight(Child.getNode());
1959 Weight = getWeight(Child.getNode());
1960 } else if (Weight == -2) {
1961 // Whoops, this node was RAUWd by one of the balanceSubTree calls we
1962 // made. Our worklist isn't up to date anymore.
1963 // Restart the whole process.
1964 LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Subtree was RAUWd. Restarting...\n"
; } } while (false)
;
1965 return balanceSubTree(N, TopLevel);
1966 }
1967
1968 NodeHeights[Child] = 1;
1969 CurrentWeight += Weight;
1970
1971 unsigned PowerOf2;
1972 if (TopLevel && !CanFactorize && !HaveTopLevelShift &&
1973 (Child.getOpcode() == ISD::MUL || Child.getOpcode() == ISD::SHL) &&
1974 Child.hasOneUse() && (PowerOf2 = getPowerOf2Factor(Child))) {
1975 // Try to identify two factorizable MUL/SHL children greedily. Leave
1976 // them out of the priority queue for now so we can deal with them
1977 // after.
1978 if (!Mul1.Value.getNode()) {
1979 Mul1 = WeightedLeaf(Child, Weight, InsertionOrder++);
1980 MaxPowerOf2 = PowerOf2;
1981 } else {
1982 Mul2 = WeightedLeaf(Child, Weight, InsertionOrder++);
1983 MaxPowerOf2 = std::min(MaxPowerOf2, PowerOf2);
1984
1985 // Our addressing modes can only shift by a maximum of 3
1986 if (MaxPowerOf2 > 3)
1987 MaxPowerOf2 = 3;
1988
1989 CanFactorize = true;
1990 }
1991 } else
1992 Leaves.push(WeightedLeaf(Child, Weight, InsertionOrder++));
1993 } else if (!isOpcodeHandled(Child.getNode())) {
1994 // CASE 2: Child is an unhandled kind of node (e.g. constant)
1995 int Weight = getWeight(Child.getNode());
1996
1997 NodeHeights[Child] = getHeight(Child.getNode());
1998 CurrentWeight += Weight;
1999
2000 if (isTargetConstant(Child) && !GA.Value.getNode())
2001 GA = WeightedLeaf(Child, Weight, InsertionOrder++);
2002 else
2003 Leaves.push(WeightedLeaf(Child, Weight, InsertionOrder++));
2004 } else {
2005 // CASE 3: Child is a subtree of same opcode
2006 // Visit children first, then flatten.
2007 unsigned ChildOpcode = Child.getOpcode();
2008 assert(ChildOpcode == NOpcode ||((ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode
== ISD::SHL)) ? static_cast<void> (0) : __assert_fail (
"ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL)"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2009, __PRETTY_FUNCTION__))
2009 (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL))((ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode
== ISD::SHL)) ? static_cast<void> (0) : __assert_fail (
"ChildOpcode == NOpcode || (NOpcode == ISD::MUL && ChildOpcode == ISD::SHL)"
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2009, __PRETTY_FUNCTION__))
;
2010
2011 // Convert SHL to MUL
2012 SDValue Op1;
2013 if (ChildOpcode == ISD::SHL)
2014 Op1 = getMultiplierForSHL(Child.getNode());
2015 else
2016 Op1 = Child->getOperand(1);
2017
2018 if (!NodeHeights.count(Op1) || !NodeHeights.count(Child->getOperand(0))) {
2019 assert(!NodeHeights.count(Child) && "Parent visited before children?")((!NodeHeights.count(Child) && "Parent visited before children?"
) ? static_cast<void> (0) : __assert_fail ("!NodeHeights.count(Child) && \"Parent visited before children?\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2019, __PRETTY_FUNCTION__))
;
2020 // Visit children first, then re-visit this node
2021 Worklist.push_back(Child);
2022 Worklist.push_back(Op1);
2023 Worklist.push_back(Child->getOperand(0));
2024 } else {
2025 // Back at this node after visiting the children
2026 if (std::abs(NodeHeights[Op1] - NodeHeights[Child->getOperand(0)]) > 1)
2027 Imbalanced = true;
2028
2029 NodeHeights[Child] = std::max(NodeHeights[Op1],
2030 NodeHeights[Child->getOperand(0)]) + 1;
2031 }
2032 }
2033 }
2034
2035 LLVM_DEBUG(dbgs() << "--> Current height=" << NodeHeights[SDValue(N, 0)]do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
2036 << " weight=" << CurrentWeightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
2037 << " imbalanced=" << Imbalanced << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Current height=" <<
NodeHeights[SDValue(N, 0)] << " weight=" << CurrentWeight
<< " imbalanced=" << Imbalanced << "\n"; }
} while (false)
;
2038
2039 // Transform MUL(x, C * 2^Y) + SHL(z, Y) -> SHL(ADD(MUL(x, C), z), Y)
2040 // This factors out a shift in order to match memw(a<<Y+b).
2041 if (CanFactorize && (willShiftRightEliminate(Mul1.Value, MaxPowerOf2) ||
2042 willShiftRightEliminate(Mul2.Value, MaxPowerOf2))) {
2043 LLVM_DEBUG(dbgs() << "--> Found common factor for two MUL children!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Found common factor for two MUL children!\n"
; } } while (false)
;
2044 int Weight = Mul1.Weight + Mul2.Weight;
2045 int Height = std::max(NodeHeights[Mul1.Value], NodeHeights[Mul2.Value]) + 1;
2046 SDValue Mul1Factored = factorOutPowerOf2(Mul1.Value, MaxPowerOf2);
2047 SDValue Mul2Factored = factorOutPowerOf2(Mul2.Value, MaxPowerOf2);
2048 SDValue Sum = CurDAG->getNode(ISD::ADD, SDLoc(N), Mul1.Value.getValueType(),
2049 Mul1Factored, Mul2Factored);
2050 SDValue Const = CurDAG->getConstant(MaxPowerOf2, SDLoc(N),
2051 Mul1.Value.getValueType());
2052 SDValue New = CurDAG->getNode(ISD::SHL, SDLoc(N), Mul1.Value.getValueType(),
2053 Sum, Const);
2054 NodeHeights[New] = Height;
2055 Leaves.push(WeightedLeaf(New, Weight, Mul1.InsertionOrder));
2056 } else if (Mul1.Value.getNode()) {
2057 // We failed to factorize two MULs, so now the Muls are left outside the
2058 // queue... add them back.
2059 Leaves.push(Mul1);
2060 if (Mul2.Value.getNode())
2061 Leaves.push(Mul2);
2062 CanFactorize = false;
2063 }
2064
2065 // Combine GA + Constant -> GA+Offset, but only if GA is not used elsewhere
2066 // and the root node itself is not used more than twice. This reduces the
2067 // amount of additional constant extenders introduced by this optimization.
2068 bool CombinedGA = false;
2069 if (NOpcode == ISD::ADD && GA.Value.getNode() && Leaves.hasConst() &&
2070 GA.Value.hasOneUse() && N->use_size() < 3) {
2071 GlobalAddressSDNode *GANode =
2072 cast<GlobalAddressSDNode>(GA.Value.getOperand(0));
2073 ConstantSDNode *Offset = cast<ConstantSDNode>(Leaves.top().Value);
2074
2075 if (getUsesInFunction(GANode->getGlobal()) == 1 && Offset->hasOneUse() &&
2076 getTargetLowering()->isOffsetFoldingLegal(GANode)) {
2077 LLVM_DEBUG(dbgs() << "--> Combining GA and offset ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Combining GA and offset ("
<< Offset->getSExtValue() << "): "; } } while
(false)
2078 << Offset->getSExtValue() << "): ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Combining GA and offset ("
<< Offset->getSExtValue() << "): "; } } while
(false)
;
2079 LLVM_DEBUG(GANode->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { GANode->dump(CurDAG); } } while (false
)
;
2080
2081 SDValue NewTGA =
2082 CurDAG->getTargetGlobalAddress(GANode->getGlobal(), SDLoc(GA.Value),
2083 GANode->getValueType(0),
2084 GANode->getOffset() + (uint64_t)Offset->getSExtValue());
2085 GA.Value = CurDAG->getNode(GA.Value.getOpcode(), SDLoc(GA.Value),
2086 GA.Value.getValueType(), NewTGA);
2087 GA.Weight += Leaves.top().Weight;
2088
2089 NodeHeights[GA.Value] = getHeight(GA.Value.getNode());
2090 CombinedGA = true;
2091
2092 Leaves.pop(); // Remove the offset constant from the queue
2093 }
2094 }
2095
2096 if ((RebalanceOnlyForOptimizations && !CanFactorize && !CombinedGA) ||
2097 (RebalanceOnlyImbalancedTrees && !Imbalanced)) {
2098 RootWeights[N] = CurrentWeight;
2099 RootHeights[N] = NodeHeights[SDValue(N, 0)];
2100
2101 return SDValue(N, 0);
2102 }
2103
2104 // Combine GA + SHL(x, C<=31) so we will match Rx=add(#u8,asl(Rx,#U5))
2105 if (NOpcode == ISD::ADD && GA.Value.getNode()) {
2106 WeightedLeaf SHL = Leaves.findSHL(31);
2107 if (SHL.Value.getNode()) {
2108 int Height = std::max(NodeHeights[GA.Value], NodeHeights[SHL.Value]) + 1;
2109 GA.Value = CurDAG->getNode(ISD::ADD, SDLoc(GA.Value),
2110 GA.Value.getValueType(),
2111 GA.Value, SHL.Value);
2112 GA.Weight = SHL.Weight; // Specifically ignore the GA weight here
2113 NodeHeights[GA.Value] = Height;
2114 }
2115 }
2116
2117 if (GA.Value.getNode())
2118 Leaves.push(GA);
2119
2120 // If this is the top level and we haven't factored out a shift, we should try
2121 // to move a constant to the bottom to match addressing modes like memw(rX+C)
2122 if (TopLevel && !CanFactorize && Leaves.hasConst()) {
2123 LLVM_DEBUG(dbgs() << "--> Pushing constant to tip of tree.")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Pushing constant to tip of tree."
; } } while (false)
;
2124 Leaves.pushToBottom(Leaves.pop());
2125 }
2126
2127 const DataLayout &DL = CurDAG->getDataLayout();
2128 const TargetLowering &TLI = *getTargetLowering();
2129
2130 // Rebuild the tree using Huffman's algorithm
2131 while (Leaves.size() > 1) {
2132 WeightedLeaf L0 = Leaves.pop();
2133
2134 // See whether we can grab a MUL to form an add(Rx,mpyi(Ry,#u6)),
2135 // otherwise just get the next leaf
2136 WeightedLeaf L1 = Leaves.findMULbyConst();
2137 if (!L1.Value.getNode())
2138 L1 = Leaves.pop();
2139
2140 assert(L0.Weight <= L1.Weight && "Priority queue is broken!")((L0.Weight <= L1.Weight && "Priority queue is broken!"
) ? static_cast<void> (0) : __assert_fail ("L0.Weight <= L1.Weight && \"Priority queue is broken!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2140, __PRETTY_FUNCTION__))
;
2141
2142 SDValue V0 = L0.Value;
2143 int V0Weight = L0.Weight;
2144 SDValue V1 = L1.Value;
2145 int V1Weight = L1.Weight;
2146
2147 // Make sure that none of these nodes have been RAUW'd
2148 if ((RootWeights.count(V0.getNode()) && RootWeights[V0.getNode()] == -2) ||
2149 (RootWeights.count(V1.getNode()) && RootWeights[V1.getNode()] == -2)) {
2150 LLVM_DEBUG(dbgs() << "--> Subtree was RAUWd. Restarting...\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Subtree was RAUWd. Restarting...\n"
; } } while (false)
;
2151 return balanceSubTree(N, TopLevel);
2152 }
2153
2154 ConstantSDNode *V0C = dyn_cast<ConstantSDNode>(V0);
2155 ConstantSDNode *V1C = dyn_cast<ConstantSDNode>(V1);
2156 EVT VT = N->getValueType(0);
2157 SDValue NewNode;
2158
2159 if (V0C && !V1C) {
2160 std::swap(V0, V1);
2161 std::swap(V0C, V1C);
2162 }
2163
2164 // Calculate height of this node
2165 assert(NodeHeights.count(V0) && NodeHeights.count(V1) &&((NodeHeights.count(V0) && NodeHeights.count(V1) &&
"Children must have been visited before re-combining them!")
? static_cast<void> (0) : __assert_fail ("NodeHeights.count(V0) && NodeHeights.count(V1) && \"Children must have been visited before re-combining them!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2166, __PRETTY_FUNCTION__))
2166 "Children must have been visited before re-combining them!")((NodeHeights.count(V0) && NodeHeights.count(V1) &&
"Children must have been visited before re-combining them!")
? static_cast<void> (0) : __assert_fail ("NodeHeights.count(V0) && NodeHeights.count(V1) && \"Children must have been visited before re-combining them!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2166, __PRETTY_FUNCTION__))
;
2167 int Height = std::max(NodeHeights[V0], NodeHeights[V1]) + 1;
2168
2169 // Rebuild this node (and restore SHL from MUL if needed)
2170 if (V1C && NOpcode == ISD::MUL && V1C->getAPIntValue().isPowerOf2())
2171 NewNode = CurDAG->getNode(
2172 ISD::SHL, SDLoc(V0), VT, V0,
2173 CurDAG->getConstant(
2174 V1C->getAPIntValue().logBase2(), SDLoc(N),
2175 TLI.getScalarShiftAmountTy(DL, V0.getValueType())));
2176 else
2177 NewNode = CurDAG->getNode(NOpcode, SDLoc(N), VT, V0, V1);
2178
2179 NodeHeights[NewNode] = Height;
2180
2181 int Weight = V0Weight + V1Weight;
2182 Leaves.push(WeightedLeaf(NewNode, Weight, L0.InsertionOrder));
2183
2184 LLVM_DEBUG(dbgs() << "--> Built new node (Weight=" << Weightdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Built new node (Weight="
<< Weight << ",Height=" << Height <<
"):\n"; } } while (false)
2185 << ",Height=" << Height << "):\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Built new node (Weight="
<< Weight << ",Height=" << Height <<
"):\n"; } } while (false)
;
2186 LLVM_DEBUG(NewNode.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { NewNode.dump(); } } while (false)
;
2187 }
2188
2189 assert(Leaves.size() == 1)((Leaves.size() == 1) ? static_cast<void> (0) : __assert_fail
("Leaves.size() == 1", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2189, __PRETTY_FUNCTION__))
;
2190 SDValue NewRoot = Leaves.top().Value;
2191
2192 assert(NodeHeights.count(NewRoot))((NodeHeights.count(NewRoot)) ? static_cast<void> (0) :
__assert_fail ("NodeHeights.count(NewRoot)", "/build/llvm-toolchain-snapshot-8~svn345461/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp"
, 2192, __PRETTY_FUNCTION__))
;
2193 int Height = NodeHeights[NewRoot];
2194
2195 // Restore SHL if we earlier converted it to a MUL
2196 if (NewRoot.getOpcode() == ISD::MUL) {
2197 ConstantSDNode *V1C = dyn_cast<ConstantSDNode>(NewRoot.getOperand(1));
2198 if (V1C && V1C->getAPIntValue().isPowerOf2()) {
2199 EVT VT = NewRoot.getValueType();
2200 SDValue V0 = NewRoot.getOperand(0);
2201 NewRoot = CurDAG->getNode(
2202 ISD::SHL, SDLoc(NewRoot), VT, V0,
2203 CurDAG->getConstant(
2204 V1C->getAPIntValue().logBase2(), SDLoc(NewRoot),
2205 TLI.getScalarShiftAmountTy(DL, V0.getValueType())));
2206 }
2207 }
2208
2209 if (N != NewRoot.getNode()) {
2210 LLVM_DEBUG(dbgs() << "--> Root is now: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Root is now: "; }
} while (false)
;
2211 LLVM_DEBUG(NewRoot.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { NewRoot.dump(); } } while (false)
;
2212
2213 // Replace all uses of old root by new root
2214 CurDAG->ReplaceAllUsesWith(N, NewRoot.getNode());
2215 // Mark that we have RAUW'd N
2216 RootWeights[N] = -2;
2217 } else {
2218 LLVM_DEBUG(dbgs() << "--> Root unchanged.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Root unchanged.\n"
; } } while (false)
;
2219 }
2220
2221 RootWeights[NewRoot.getNode()] = Leaves.top().Weight;
2222 RootHeights[NewRoot.getNode()] = Height;
2223
2224 return NewRoot;
2225}
2226
2227void HexagonDAGToDAGISel::rebalanceAddressTrees() {
2228 for (auto I = CurDAG->allnodes_begin(), E = CurDAG->allnodes_end(); I != E;) {
2229 SDNode *N = &*I++;
2230 if (N->getOpcode() != ISD::LOAD && N->getOpcode() != ISD::STORE)
2231 continue;
2232
2233 SDValue BasePtr = cast<MemSDNode>(N)->getBasePtr();
2234 if (BasePtr.getOpcode() != ISD::ADD)
2235 continue;
2236
2237 // We've already processed this node
2238 if (RootWeights.count(BasePtr.getNode()))
2239 continue;
2240
2241 LLVM_DEBUG(dbgs() << "** Rebalancing address calculation in node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "** Rebalancing address calculation in node: "
; } } while (false)
;
2242 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
2243
2244 // FindRoots
2245 SmallVector<SDNode *, 4> Worklist;
2246
2247 Worklist.push_back(BasePtr.getOperand(0).getNode());
2248 Worklist.push_back(BasePtr.getOperand(1).getNode());
2249
2250 while (!Worklist.empty()) {
2251 SDNode *N = Worklist.pop_back_val();
2252 unsigned Opcode = N->getOpcode();
2253
2254 if (!isOpcodeHandled(N))
2255 continue;
2256
2257 Worklist.push_back(N->getOperand(0).getNode());
2258 Worklist.push_back(N->getOperand(1).getNode());
2259
2260 // Not a root if it has only one use and same opcode as its parent
2261 if (N->hasOneUse() && Opcode == N->use_begin()->getOpcode())
2262 continue;
2263
2264 // This root node has already been processed
2265 if (RootWeights.count(N))
2266 continue;
2267
2268 RootWeights[N] = -1;
2269 }
2270
2271 // Balance node itself
2272 RootWeights[BasePtr.getNode()] = -1;
2273 SDValue NewBasePtr = balanceSubTree(BasePtr.getNode(), /*TopLevel=*/ true);
2274
2275 if (N->getOpcode() == ISD::LOAD)
2276 N = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
2277 NewBasePtr, N->getOperand(2));
2278 else
2279 N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), N->getOperand(1),
2280 NewBasePtr, N->getOperand(3));
2281
2282 LLVM_DEBUG(dbgs() << "--> Final node: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { dbgs() << "--> Final node: "; } }
while (false)
;
2283 LLVM_DEBUG(N->dump(CurDAG))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("hexagon-isel")) { N->dump(CurDAG); } } while (false)
;
2284 }
2285
2286 CurDAG->RemoveDeadNodes();
2287 GAUsesInFunction.clear();
2288 RootHeights.clear();
2289 RootWeights.clear();
2290}

/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h

1//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains some functions that are useful for math stuff.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_MATHEXTRAS_H
15#define LLVM_SUPPORT_MATHEXTRAS_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/SwapByteOrder.h"
19#include <algorithm>
20#include <cassert>
21#include <climits>
22#include <cstring>
23#include <limits>
24#include <type_traits>
25
26#ifdef __ANDROID_NDK__
27#include <android/api-level.h>
28#endif
29
30#ifdef _MSC_VER
31// Declare these intrinsics manually rather including intrin.h. It's very
32// expensive, and MathExtras.h is popular.
33// #include <intrin.h>
34extern "C" {
35unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
36unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
37unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
38unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
39}
40#endif
41
42namespace llvm {
43/// The behavior an operation has on an input of 0.
44enum ZeroBehavior {
45 /// The returned value is undefined.
46 ZB_Undefined,
47 /// The returned value is numeric_limits<T>::max()
48 ZB_Max,
49 /// The returned value is numeric_limits<T>::digits
50 ZB_Width
51};
52
53namespace detail {
54template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
55 static std::size_t count(T Val, ZeroBehavior) {
56 if (!Val)
57 return std::numeric_limits<T>::digits;
58 if (Val & 0x1)
59 return 0;
60
61 // Bisection method.
62 std::size_t ZeroBits = 0;
63 T Shift = std::numeric_limits<T>::digits >> 1;
64 T Mask = std::numeric_limits<T>::max() >> Shift;
65 while (Shift) {
66 if ((Val & Mask) == 0) {
67 Val >>= Shift;
68 ZeroBits |= Shift;
69 }
70 Shift >>= 1;
71 Mask >>= Shift;
72 }
73 return ZeroBits;
74 }
75};
76
77#if __GNUC__4 >= 4 || defined(_MSC_VER)
78template <typename T> struct TrailingZerosCounter<T, 4> {
79 static std::size_t count(T Val, ZeroBehavior ZB) {
80 if (ZB != ZB_Undefined && Val == 0)
18
Assuming 'Val' is equal to 0
19
Taking true branch
81 return 32;
20
Returning the value 32
82
83#if __has_builtin(__builtin_ctz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
84 return __builtin_ctz(Val);
85#elif defined(_MSC_VER)
86 unsigned long Index;
87 _BitScanForward(&Index, Val);
88 return Index;
89#endif
90 }
91};
92
93#if !defined(_MSC_VER) || defined(_M_X64)
94template <typename T> struct TrailingZerosCounter<T, 8> {
95 static std::size_t count(T Val, ZeroBehavior ZB) {
96 if (ZB != ZB_Undefined && Val == 0)
97 return 64;
98
99#if __has_builtin(__builtin_ctzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
100 return __builtin_ctzll(Val);
101#elif defined(_MSC_VER)
102 unsigned long Index;
103 _BitScanForward64(&Index, Val);
104 return Index;
105#endif
106 }
107};
108#endif
109#endif
110} // namespace detail
111
112/// Count number of 0's from the least significant bit to the most
113/// stopping at the first 1.
114///
115/// Only unsigned integral types are allowed.
116///
117/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
118/// valid arguments.
119template <typename T>
120std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
121 static_assert(std::numeric_limits<T>::is_integer &&
122 !std::numeric_limits<T>::is_signed,
123 "Only unsigned integral types are allowed.");
124 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
17
Calling 'TrailingZerosCounter::count'
21
Returning from 'TrailingZerosCounter::count'
22
Returning the value 32
125}
126
127namespace detail {
128template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
129 static std::size_t count(T Val, ZeroBehavior) {
130 if (!Val)
131 return std::numeric_limits<T>::digits;
132
133 // Bisection method.
134 std::size_t ZeroBits = 0;
135 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
136 T Tmp = Val >> Shift;
137 if (Tmp)
138 Val = Tmp;
139 else
140 ZeroBits |= Shift;
141 }
142 return ZeroBits;
143 }
144};
145
146#if __GNUC__4 >= 4 || defined(_MSC_VER)
147template <typename T> struct LeadingZerosCounter<T, 4> {
148 static std::size_t count(T Val, ZeroBehavior ZB) {
149 if (ZB != ZB_Undefined && Val == 0)
150 return 32;
151
152#if __has_builtin(__builtin_clz)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
153 return __builtin_clz(Val);
154#elif defined(_MSC_VER)
155 unsigned long Index;
156 _BitScanReverse(&Index, Val);
157 return Index ^ 31;
158#endif
159 }
160};
161
162#if !defined(_MSC_VER) || defined(_M_X64)
163template <typename T> struct LeadingZerosCounter<T, 8> {
164 static std::size_t count(T Val, ZeroBehavior ZB) {
165 if (ZB != ZB_Undefined && Val == 0)
166 return 64;
167
168#if __has_builtin(__builtin_clzll)1 || LLVM_GNUC_PREREQ(4, 0, 0)((4 << 20) + (2 << 10) + 1 >= ((4) << 20
) + ((0) << 10) + (0))
169 return __builtin_clzll(Val);
170#elif defined(_MSC_VER)
171 unsigned long Index;
172 _BitScanReverse64(&Index, Val);
173 return Index ^ 63;
174#endif
175 }
176};
177#endif
178#endif
179} // namespace detail
180
181/// Count number of 0's from the most significant bit to the least
182/// stopping at the first 1.
183///
184/// Only unsigned integral types are allowed.
185///
186/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
187/// valid arguments.
188template <typename T>
189std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
190 static_assert(std::numeric_limits<T>::is_integer &&
191 !std::numeric_limits<T>::is_signed,
192 "Only unsigned integral types are allowed.");
193 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
194}
195
196/// Get the index of the first set bit starting from the least
197/// significant bit.
198///
199/// Only unsigned integral types are allowed.
200///
201/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
202/// valid arguments.
203template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
204 if (ZB == ZB_Max && Val == 0)
205 return std::numeric_limits<T>::max();
206
207 return countTrailingZeros(Val, ZB_Undefined);
208}
209
210/// Create a bitmask with the N right-most bits set to 1, and all other
211/// bits set to 0. Only unsigned types are allowed.
212template <typename T> T maskTrailingOnes(unsigned N) {
213 static_assert(std::is_unsigned<T>::value, "Invalid type!");
214 const unsigned Bits = CHAR_BIT8 * sizeof(T);
215 assert(N <= Bits && "Invalid bit index")((N <= Bits && "Invalid bit index") ? static_cast<
void> (0) : __assert_fail ("N <= Bits && \"Invalid bit index\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 215, __PRETTY_FUNCTION__))
;
216 return N == 0 ? 0 : (T(-1) >> (Bits - N));
217}
218
219/// Create a bitmask with the N left-most bits set to 1, and all other
220/// bits set to 0. Only unsigned types are allowed.
221template <typename T> T maskLeadingOnes(unsigned N) {
222 return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
223}
224
225/// Create a bitmask with the N right-most bits set to 0, and all other
226/// bits set to 1. Only unsigned types are allowed.
227template <typename T> T maskTrailingZeros(unsigned N) {
228 return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
229}
230
231/// Create a bitmask with the N left-most bits set to 0, and all other
232/// bits set to 1. Only unsigned types are allowed.
233template <typename T> T maskLeadingZeros(unsigned N) {
234 return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N);
235}
236
237/// Get the index of the last set bit starting from the least
238/// significant bit.
239///
240/// Only unsigned integral types are allowed.
241///
242/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
243/// valid arguments.
244template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
245 if (ZB == ZB_Max && Val == 0)
246 return std::numeric_limits<T>::max();
247
248 // Use ^ instead of - because both gcc and llvm can remove the associated ^
249 // in the __builtin_clz intrinsic on x86.
250 return countLeadingZeros(Val, ZB_Undefined) ^
251 (std::numeric_limits<T>::digits - 1);
252}
253
254/// Macro compressed bit reversal table for 256 bits.
255///
256/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
257static const unsigned char BitReverseTable256[256] = {
258#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
259#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
260#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
261 R6(0), R6(2), R6(1), R6(3)
262#undef R2
263#undef R4
264#undef R6
265};
266
267/// Reverse the bits in \p Val.
268template <typename T>
269T reverseBits(T Val) {
270 unsigned char in[sizeof(Val)];
271 unsigned char out[sizeof(Val)];
272 std::memcpy(in, &Val, sizeof(Val));
273 for (unsigned i = 0; i < sizeof(Val); ++i)
274 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
275 std::memcpy(&Val, out, sizeof(Val));
276 return Val;
277}
278
279// NOTE: The following support functions use the _32/_64 extensions instead of
280// type overloading so that signed and unsigned integers can be used without
281// ambiguity.
282
283/// Return the high 32 bits of a 64 bit value.
284constexpr inline uint32_t Hi_32(uint64_t Value) {
285 return static_cast<uint32_t>(Value >> 32);
286}
287
288/// Return the low 32 bits of a 64 bit value.
289constexpr inline uint32_t Lo_32(uint64_t Value) {
290 return static_cast<uint32_t>(Value);
291}
292
293/// Make a 64-bit integer from a high / low pair of 32-bit integers.
294constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
295 return ((uint64_t)High << 32) | (uint64_t)Low;
296}
297
298/// Checks if an integer fits into the given bit width.
299template <unsigned N> constexpr inline bool isInt(int64_t x) {
300 return N >= 64 || (-(INT64_C(1)1L<<(N-1)) <= x && x < (INT64_C(1)1L<<(N-1)));
301}
302// Template specializations to get better code for common cases.
303template <> constexpr inline bool isInt<8>(int64_t x) {
304 return static_cast<int8_t>(x) == x;
305}
306template <> constexpr inline bool isInt<16>(int64_t x) {
307 return static_cast<int16_t>(x) == x;
308}
309template <> constexpr inline bool isInt<32>(int64_t x) {
310 return static_cast<int32_t>(x) == x;
311}
312
313/// Checks if a signed integer is an N bit number shifted left by S.
314template <unsigned N, unsigned S>
315constexpr inline bool isShiftedInt(int64_t x) {
316 static_assert(
317 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
318 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
319 return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
320}
321
322/// Checks if an unsigned integer fits into the given bit width.
323///
324/// This is written as two functions rather than as simply
325///
326/// return N >= 64 || X < (UINT64_C(1) << N);
327///
328/// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting
329/// left too many places.
330template <unsigned N>
331constexpr inline typename std::enable_if<(N < 64), bool>::type
332isUInt(uint64_t X) {
333 static_assert(N > 0, "isUInt<0> doesn't make sense");
334 return X < (UINT64_C(1)1UL << (N));
335}
336template <unsigned N>
337constexpr inline typename std::enable_if<N >= 64, bool>::type
338isUInt(uint64_t X) {
339 return true;
340}
341
342// Template specializations to get better code for common cases.
343template <> constexpr inline bool isUInt<8>(uint64_t x) {
344 return static_cast<uint8_t>(x) == x;
345}
346template <> constexpr inline bool isUInt<16>(uint64_t x) {
347 return static_cast<uint16_t>(x) == x;
348}
349template <> constexpr inline bool isUInt<32>(uint64_t x) {
350 return static_cast<uint32_t>(x) == x;
351}
352
353/// Checks if a unsigned integer is an N bit number shifted left by S.
354template <unsigned N, unsigned S>
355constexpr inline bool isShiftedUInt(uint64_t x) {
356 static_assert(
357 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
358 static_assert(N + S <= 64,
359 "isShiftedUInt<N, S> with N + S > 64 is too wide.");
360 // Per the two static_asserts above, S must be strictly less than 64. So
361 // 1 << S is not undefined behavior.
362 return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0);
363}
364
365/// Gets the maximum value for a N-bit unsigned integer.
366inline uint64_t maxUIntN(uint64_t N) {
367 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 367, __PRETTY_FUNCTION__))
;
368
369 // uint64_t(1) << 64 is undefined behavior, so we can't do
370 // (uint64_t(1) << N) - 1
371 // without checking first that N != 64. But this works and doesn't have a
372 // branch.
373 return UINT64_MAX(18446744073709551615UL) >> (64 - N);
374}
375
376/// Gets the minimum value for a N-bit signed integer.
377inline int64_t minIntN(int64_t N) {
378 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 378, __PRETTY_FUNCTION__))
;
379
380 return -(UINT64_C(1)1UL<<(N-1));
381}
382
383/// Gets the maximum value for a N-bit signed integer.
384inline int64_t maxIntN(int64_t N) {
385 assert(N > 0 && N <= 64 && "integer width out of range")((N > 0 && N <= 64 && "integer width out of range"
) ? static_cast<void> (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 385, __PRETTY_FUNCTION__))
;
386
387 // This relies on two's complement wraparound when N == 64, so we convert to
388 // int64_t only at the very end to avoid UB.
389 return (UINT64_C(1)1UL << (N - 1)) - 1;
390}
391
392/// Checks if an unsigned integer fits into the given (dynamic) bit width.
393inline bool isUIntN(unsigned N, uint64_t x) {
394 return N >= 64 || x <= maxUIntN(N);
395}
396
397/// Checks if an signed integer fits into the given (dynamic) bit width.
398inline bool isIntN(unsigned N, int64_t x) {
399 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
400}
401
402/// Return true if the argument is a non-empty sequence of ones starting at the
403/// least significant bit with the remainder zero (32 bit version).
404/// Ex. isMask_32(0x0000FFFFU) == true.
405constexpr inline bool isMask_32(uint32_t Value) {
406 return Value && ((Value + 1) & Value) == 0;
407}
408
409/// Return true if the argument is a non-empty sequence of ones starting at the
410/// least significant bit with the remainder zero (64 bit version).
411constexpr inline bool isMask_64(uint64_t Value) {
412 return Value && ((Value + 1) & Value) == 0;
413}
414
415/// Return true if the argument contains a non-empty sequence of ones with the
416/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
417constexpr inline bool isShiftedMask_32(uint32_t Value) {
418 return Value && isMask_32((Value - 1) | Value);
419}
420
421/// Return true if the argument contains a non-empty sequence of ones with the
422/// remainder zero (64 bit version.)
423constexpr inline bool isShiftedMask_64(uint64_t Value) {
424 return Value && isMask_64((Value - 1) | Value);
425}
426
427/// Return true if the argument is a power of two > 0.
428/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
429constexpr inline bool isPowerOf2_32(uint32_t Value) {
430 return Value && !(Value & (Value - 1));
431}
432
433/// Return true if the argument is a power of two > 0 (64 bit edition.)
434constexpr inline bool isPowerOf2_64(uint64_t Value) {
435 return Value && !(Value & (Value - 1));
436}
437
438/// Return a byte-swapped representation of the 16-bit argument.
439inline uint16_t ByteSwap_16(uint16_t Value) {
440 return sys::SwapByteOrder_16(Value);
441}
442
443/// Return a byte-swapped representation of the 32-bit argument.
444inline uint32_t ByteSwap_32(uint32_t Value) {
445 return sys::SwapByteOrder_32(Value);
446}
447
448/// Return a byte-swapped representation of the 64-bit argument.
449inline uint64_t ByteSwap_64(uint64_t Value) {
450 return sys::SwapByteOrder_64(Value);
451}
452
453/// Count the number of ones from the most significant bit to the first
454/// zero bit.
455///
456/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
457/// Only unsigned integral types are allowed.
458///
459/// \param ZB the behavior on an input of all ones. Only ZB_Width and
460/// ZB_Undefined are valid arguments.
461template <typename T>
462std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
463 static_assert(std::numeric_limits<T>::is_integer &&
464 !std::numeric_limits<T>::is_signed,
465 "Only unsigned integral types are allowed.");
466 return countLeadingZeros<T>(~Value, ZB);
467}
468
469/// Count the number of ones from the least significant bit to the first
470/// zero bit.
471///
472/// Ex. countTrailingOnes(0x00FF00FF) == 8.
473/// Only unsigned integral types are allowed.
474///
475/// \param ZB the behavior on an input of all ones. Only ZB_Width and
476/// ZB_Undefined are valid arguments.
477template <typename T>
478std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
479 static_assert(std::numeric_limits<T>::is_integer &&
480 !std::numeric_limits<T>::is_signed,
481 "Only unsigned integral types are allowed.");
482 return countTrailingZeros<T>(~Value, ZB);
483}
484
485namespace detail {
486template <typename T, std::size_t SizeOfT> struct PopulationCounter {
487 static unsigned count(T Value) {
488 // Generic version, forward to 32 bits.
489 static_assert(SizeOfT <= 4, "Not implemented!");
490#if __GNUC__4 >= 4
491 return __builtin_popcount(Value);
492#else
493 uint32_t v = Value;
494 v = v - ((v >> 1) & 0x55555555);
495 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
496 return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
497#endif
498 }
499};
500
501template <typename T> struct PopulationCounter<T, 8> {
502 static unsigned count(T Value) {
503#if __GNUC__4 >= 4
504 return __builtin_popcountll(Value);
505#else
506 uint64_t v = Value;
507 v = v - ((v >> 1) & 0x5555555555555555ULL);
508 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
509 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
510 return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
511#endif
512 }
513};
514} // namespace detail
515
516/// Count the number of set bits in a value.
517/// Ex. countPopulation(0xF000F000) = 8
518/// Returns 0 if the word is zero.
519template <typename T>
520inline unsigned countPopulation(T Value) {
521 static_assert(std::numeric_limits<T>::is_integer &&
522 !std::numeric_limits<T>::is_signed,
523 "Only unsigned integral types are allowed.");
524 return detail::PopulationCounter<T, sizeof(T)>::count(Value);
525}
526
527/// Return the log base 2 of the specified value.
528inline double Log2(double Value) {
529#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
530 return __builtin_log(Value) / __builtin_log(2.0);
531#else
532 return log2(Value);
533#endif
534}
535
536/// Return the floor log base 2 of the specified value, -1 if the value is zero.
537/// (32 bit edition.)
538/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
539inline unsigned Log2_32(uint32_t Value) {
540 return 31 - countLeadingZeros(Value);
541}
542
543/// Return the floor log base 2 of the specified value, -1 if the value is zero.
544/// (64 bit edition.)
545inline unsigned Log2_64(uint64_t Value) {
546 return 63 - countLeadingZeros(Value);
547}
548
549/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
550/// (32 bit edition).
551/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
552inline unsigned Log2_32_Ceil(uint32_t Value) {
553 return 32 - countLeadingZeros(Value - 1);
554}
555
556/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
557/// (64 bit edition.)
558inline unsigned Log2_64_Ceil(uint64_t Value) {
559 return 64 - countLeadingZeros(Value - 1);
560}
561
562/// Return the greatest common divisor of the values using Euclid's algorithm.
563inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
564 while (B) {
565 uint64_t T = B;
566 B = A % B;
567 A = T;
568 }
569 return A;
570}
571
572/// This function takes a 64-bit integer and returns the bit equivalent double.
573inline double BitsToDouble(uint64_t Bits) {
574 double D;
575 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
576 memcpy(&D, &Bits, sizeof(Bits));
577 return D;
578}
579
580/// This function takes a 32-bit integer and returns the bit equivalent float.
581inline float BitsToFloat(uint32_t Bits) {
582 float F;
583 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
584 memcpy(&F, &Bits, sizeof(Bits));
585 return F;
586}
587
588/// This function takes a double and returns the bit equivalent 64-bit integer.
589/// Note that copying doubles around changes the bits of NaNs on some hosts,
590/// notably x86, so this routine cannot be used if these bits are needed.
591inline uint64_t DoubleToBits(double Double) {
592 uint64_t Bits;
593 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
594 memcpy(&Bits, &Double, sizeof(Double));
595 return Bits;
596}
597
598/// This function takes a float and returns the bit equivalent 32-bit integer.
599/// Note that copying floats around changes the bits of NaNs on some hosts,
600/// notably x86, so this routine cannot be used if these bits are needed.
601inline uint32_t FloatToBits(float Float) {
602 uint32_t Bits;
603 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
604 memcpy(&Bits, &Float, sizeof(Float));
605 return Bits;
606}
607
608/// A and B are either alignments or offsets. Return the minimum alignment that
609/// may be assumed after adding the two together.
610constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
611 // The largest power of 2 that divides both A and B.
612 //
613 // Replace "-Value" by "1+~Value" in the following commented code to avoid
614 // MSVC warning C4146
615 // return (A | B) & -(A | B);
616 return (A | B) & (1 + ~(A | B));
617}
618
619/// Aligns \c Addr to \c Alignment bytes, rounding up.
620///
621/// Alignment should be a power of two. This method rounds up, so
622/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
623inline uintptr_t alignAddr(const void *Addr, size_t Alignment) {
624 assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&((Alignment && isPowerOf2_64((uint64_t)Alignment) &&
"Alignment is not a power of two!") ? static_cast<void>
(0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 625, __PRETTY_FUNCTION__))
625 "Alignment is not a power of two!")((Alignment && isPowerOf2_64((uint64_t)Alignment) &&
"Alignment is not a power of two!") ? static_cast<void>
(0) : __assert_fail ("Alignment && isPowerOf2_64((uint64_t)Alignment) && \"Alignment is not a power of two!\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 625, __PRETTY_FUNCTION__))
;
626
627 assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr)(((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr) ? static_cast
<void> (0) : __assert_fail ("(uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr"
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 627, __PRETTY_FUNCTION__))
;
628
629 return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
630}
631
632/// Returns the necessary adjustment for aligning \c Ptr to \c Alignment
633/// bytes, rounding up.
634inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
635 return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
636}
637
638/// Returns the next power of two (in 64-bits) that is strictly greater than A.
639/// Returns zero on overflow.
640inline uint64_t NextPowerOf2(uint64_t A) {
641 A |= (A >> 1);
642 A |= (A >> 2);
643 A |= (A >> 4);
644 A |= (A >> 8);
645 A |= (A >> 16);
646 A |= (A >> 32);
647 return A + 1;
648}
649
650/// Returns the power of two which is less than or equal to the given value.
651/// Essentially, it is a floor operation across the domain of powers of two.
652inline uint64_t PowerOf2Floor(uint64_t A) {
653 if (!A) return 0;
654 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
655}
656
657/// Returns the power of two which is greater than or equal to the given value.
658/// Essentially, it is a ceil operation across the domain of powers of two.
659inline uint64_t PowerOf2Ceil(uint64_t A) {
660 if (!A)
661 return 0;
662 return NextPowerOf2(A - 1);
663}
664
665/// Returns the next integer (mod 2**64) that is greater than or equal to
666/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
667///
668/// If non-zero \p Skew is specified, the return value will be a minimal
669/// integer that is greater than or equal to \p Value and equal to
670/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
671/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
672///
673/// Examples:
674/// \code
675/// alignTo(5, 8) = 8
676/// alignTo(17, 8) = 24
677/// alignTo(~0LL, 8) = 0
678/// alignTo(321, 255) = 510
679///
680/// alignTo(5, 8, 7) = 7
681/// alignTo(17, 8, 1) = 17
682/// alignTo(~0LL, 8, 3) = 3
683/// alignTo(321, 255, 42) = 552
684/// \endcode
685inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
686 assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast<
void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 686, __PRETTY_FUNCTION__))
;
687 Skew %= Align;
688 return (Value + Align - 1 - Skew) / Align * Align + Skew;
689}
690
691/// Returns the next integer (mod 2**64) that is greater than or equal to
692/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
693template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
694 static_assert(Align != 0u, "Align must be non-zero");
695 return (Value + Align - 1) / Align * Align;
696}
697
698/// Returns the integer ceil(Numerator / Denominator).
699inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
700 return alignTo(Numerator, Denominator) / Denominator;
701}
702
703/// \c alignTo for contexts where a constant expression is required.
704/// \sa alignTo
705///
706/// \todo FIXME: remove when \c constexpr becomes really \c constexpr
707template <uint64_t Align>
708struct AlignTo {
709 static_assert(Align != 0u, "Align must be non-zero");
710 template <uint64_t Value>
711 struct from_value {
712 static const uint64_t value = (Value + Align - 1) / Align * Align;
713 };
714};
715
716/// Returns the largest uint64_t less than or equal to \p Value and is
717/// \p Skew mod \p Align. \p Align must be non-zero
718inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
719 assert(Align != 0u && "Align can't be 0.")((Align != 0u && "Align can't be 0.") ? static_cast<
void> (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 719, __PRETTY_FUNCTION__))
;
720 Skew %= Align;
721 return (Value - Skew) / Align * Align + Skew;
722}
723
724/// Returns the offset to the next integer (mod 2**64) that is greater than
725/// or equal to \p Value and is a multiple of \p Align. \p Align must be
726/// non-zero.
727inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
728 return alignTo(Value, Align) - Value;
729}
730
731/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
732/// Requires 0 < B <= 32.
733template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
734 static_assert(B > 0, "Bit width can't be 0.");
735 static_assert(B <= 32, "Bit width out of range.");
736 return int32_t(X << (32 - B)) >> (32 - B);
737}
738
739/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
740/// Requires 0 < B < 32.
741inline int32_t SignExtend32(uint32_t X, unsigned B) {
742 assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast<
void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 742, __PRETTY_FUNCTION__))
;
743 assert(B <= 32 && "Bit width out of range.")((B <= 32 && "Bit width out of range.") ? static_cast
<void> (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 743, __PRETTY_FUNCTION__))
;
744 return int32_t(X << (32 - B)) >> (32 - B);
745}
746
747/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
748/// Requires 0 < B < 64.
749template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
750 static_assert(B > 0, "Bit width can't be 0.");
751 static_assert(B <= 64, "Bit width out of range.");
752 return int64_t(x << (64 - B)) >> (64 - B);
753}
754
755/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
756/// Requires 0 < B < 64.
757inline int64_t SignExtend64(uint64_t X, unsigned B) {
758 assert(B > 0 && "Bit width can't be 0.")((B > 0 && "Bit width can't be 0.") ? static_cast<
void> (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 758, __PRETTY_FUNCTION__))
;
759 assert(B <= 64 && "Bit width out of range.")((B <= 64 && "Bit width out of range.") ? static_cast
<void> (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\""
, "/build/llvm-toolchain-snapshot-8~svn345461/include/llvm/Support/MathExtras.h"
, 759, __PRETTY_FUNCTION__))
;
760 return int64_t(X << (64 - B)) >> (64 - B);
761}
762
763/// Subtract two unsigned integers, X and Y, of type T and return the absolute
764/// value of the result.
765template <typename T>
766typename std::enable_if<std::is_unsigned<T>::value, T>::type
767AbsoluteDifference(T X, T Y) {
768 return std::max(X, Y) - std::min(X, Y);
769}
770
771/// Add two unsigned integers, X and Y, of type T. Clamp the result to the
772/// maximum representable value of T on overflow. ResultOverflowed indicates if
773/// the result is larger than the maximum representable value of type T.
774template <typename T>
775typename std::enable_if<std::is_unsigned<T>::value, T>::type
776SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
777 bool Dummy;
778 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
779 // Hacker's Delight, p. 29
780 T Z = X + Y;
781 Overflowed = (Z < X || Z < Y);
782 if (Overflowed)
783 return std::numeric_limits<T>::max();
784 else
785 return Z;
786}
787
788/// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the
789/// maximum representable value of T on overflow. ResultOverflowed indicates if
790/// the result is larger than the maximum representable value of type T.
791template <typename T>
792typename std::enable_if<std::is_unsigned<T>::value, T>::type
793SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
794 bool Dummy;
795 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
796
797 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
798 // because it fails for uint16_t (where multiplication can have undefined
799 // behavior due to promotion to int), and requires a division in addition
800 // to the multiplication.
801
802 Overflowed = false;
803
804 // Log2(Z) would be either Log2Z or Log2Z + 1.
805 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
806 // will necessarily be less than Log2Max as desired.
807 int Log2Z = Log2_64(X) + Log2_64(Y);
808 const T Max = std::numeric_limits<T>::max();
809 int Log2Max = Log2_64(Max);
810 if (Log2Z < Log2Max) {
811 return X * Y;
812 }
813 if (Log2Z > Log2Max) {
814 Overflowed = true;
815 return Max;
816 }
817
818 // We're going to use the top bit, and maybe overflow one
819 // bit past it. Multiply all but the bottom bit then add
820 // that on at the end.
821 T Z = (X >> 1) * Y;
822 if (Z & ~(Max >> 1)) {
823 Overflowed = true;
824 return Max;
825 }
826 Z <<= 1;
827 if (X & 1)
828 return SaturatingAdd(Z, Y, ResultOverflowed);
829
830 return Z;
831}
832
833/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
834/// the product. Clamp the result to the maximum representable value of T on
835/// overflow. ResultOverflowed indicates if the result is larger than the
836/// maximum representable value of type T.
837template <typename T>
838typename std::enable_if<std::is_unsigned<T>::value, T>::type
839SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
840 bool Dummy;
841 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
842
843 T Product = SaturatingMultiply(X, Y, &Overflowed);
844 if (Overflowed)
845 return Product;
846
847 return SaturatingAdd(A, Product, &Overflowed);
848}
849
850/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
851extern const float huge_valf;
852} // End llvm namespace
853
854#endif