Go to the documentation of this file.
11 #ifndef LLVM_ANALYSIS_REGIONITERATOR_H
12 #define LLVM_ANALYSIS_REGIONITERATOR_H
20 #include <type_traits>
48 using SuccIterTy =
typename BlockTraits::ChildIteratorType;
61 static_assert(std::is_pointer<NodeRef>::value,
62 "FIXME: Currently RNSuccIterator only supports NodeRef as "
63 "pointers due to the use of pointer-specific data structures "
64 "(e.g. PointerIntPair and SmallPtrSet) internally. Generalize "
65 "it to support non-pointer types");
75 void advanceRegionSucc() {
76 assert(
Node.getInt() == ItRgBegin &&
"Cannot advance region successor!");
80 NodeRef getNode()
const {
return Node.getPointer(); }
83 bool isRegionMode()
const {
return Node.getInt() != ItBB; }
89 succ = getNode()->getParent()->getNode(
BB);
90 assert(succ &&
"BB not in Region or entered subregion!");
95 inline BlockT* getRegionSucc()
const {
96 assert(
Node.getInt() == ItRgBegin &&
"Cannot get the region successor!");
97 return getNode()->template getNodeAs<RegionT>()->getExit();
101 inline bool isExit(BlockT*
BB)
const {
102 return getNode()->getParent()->getExit() ==
BB;
114 while (BlockTraits::child_end(
node->getEntry()) != BItor && isExit(*BItor))
117 if (isRegionMode() && isExit(getRegionSucc()))
127 assert(isRegionMode() ==
x.isRegionMode() &&
"Broken iterator!");
129 return Node.getInt() ==
x.Node.getInt();
131 return BItor ==
x.BItor;
137 BlockT *
BB = isRegionMode() ? getRegionSucc() : *BItor;
138 assert(!isExit(
BB) &&
"Iterator out of range!");
150 while (BItor != BlockTraits::child_end(getNode()->getEntry())
169 template <
class NodeRef,
class BlockT,
class RegionT>
172 using SuccIterTy =
typename BlockTraits::ChildIteratorType;
193 "Subregion node not allowed in flat iterating mode!");
194 assert(
Node->getParent() &&
"A BB node must have a parent!");
197 while (BlockTraits::child_end(
Node->getEntry()) != Itor &&
198 Node->getParent()->getExit() == *Itor)
206 "Subregion node not allowed in flat iterating mode!");
211 &&
"Cannot compare iterators of different regions!");
213 return Itor ==
x.Itor &&
Node ==
x.Node;
222 RegionT *Parent =
Node->getParent();
226 assert(Parent->getExit() !=
BB &&
"iterator out of range!");
228 return Parent->getBBNode(
BB);
236 &&
Node->getParent()->getExit() == *Itor);
248 template <
class NodeRef,
class BlockT,
class RegionT>
253 template <
class NodeRef,
class BlockT,
class RegionT>
265 #define RegionNodeGraphTraits(NodeT, BlockT, RegionT) \
266 template <> struct GraphTraits<NodeT *> { \
267 using NodeRef = NodeT *; \
268 using ChildIteratorType = RNSuccIterator<NodeRef, BlockT, RegionT>; \
269 static NodeRef getEntryNode(NodeRef N) { return N; } \
270 static inline ChildIteratorType child_begin(NodeRef N) { \
271 return RNSuccIterator<NodeRef, BlockT, RegionT>(N); \
273 static inline ChildIteratorType child_end(NodeRef N) { \
274 return RNSuccIterator<NodeRef, BlockT, RegionT>(N, true); \
277 template <> struct GraphTraits<FlatIt<NodeT *>> { \
278 using NodeRef = NodeT *; \
279 using ChildIteratorType = \
280 RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>; \
281 static NodeRef getEntryNode(NodeRef N) { return N; } \
282 static inline ChildIteratorType child_begin(NodeRef N) { \
283 return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N); \
285 static inline ChildIteratorType child_end(NodeRef N) { \
286 return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N, true); \
290 #define RegionGraphTraits(RegionT, NodeT) \
291 template <> struct GraphTraits<RegionT *> : public GraphTraits<NodeT *> { \
292 using nodes_iterator = df_iterator<NodeRef>; \
293 static NodeRef getEntryNode(RegionT *R) { \
294 return R->getNode(R->getEntry()); \
296 static nodes_iterator nodes_begin(RegionT *R) { \
297 return nodes_iterator::begin(getEntryNode(R)); \
299 static nodes_iterator nodes_end(RegionT *R) { \
300 return nodes_iterator::end(getEntryNode(R)); \
304 struct GraphTraits<FlatIt<RegionT *>> \
305 : public GraphTraits<FlatIt<NodeT *>> { \
306 using nodes_iterator = \
307 df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false, \
308 GraphTraits<FlatIt<NodeRef>>>; \
309 static NodeRef getEntryNode(RegionT *R) { \
310 return R->getBBNode(R->getEntry()); \
312 static nodes_iterator nodes_begin(RegionT *R) { \
313 return nodes_iterator::begin(getEntryNode(R)); \
315 static nodes_iterator nodes_end(RegionT *R) { \
316 return nodes_iterator::end(getEntryNode(R)); \
366 #endif // LLVM_ANALYSIS_REGIONITERATOR_H
This is an optimization pass for GlobalISel generic memory operations.
RegionT * getTopLevelRegion() const
bool operator!=(const Self &x) const
RNSuccIterator(NodeRef node)
Create the iterator from a RegionNode.
RNSuccIterator(NodeRef node, bool)
Create an end iterator.
bool operator==(const Self &x) const
Interval::succ_iterator succ_end(Interval *I)
Marker class to iterate over the elements of a Region in flat mode.
Flat RegionNode iterator.
bool operator==(const Self &x) const
const_iterator end(StringRef path)
Get end iterator over path.
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
RNSuccIterator(NodeRef node, bool)
Create an end iterator.
alloca< 16 x float >, align 16 %tmp2=alloca< 16 x float >, align 16 store< 16 x float > %A,< 16 x float > *%tmp %s=bitcast< 16 x float > *%tmp to i8 *%s2=bitcast< 16 x float > *%tmp2 to i8 *call void @llvm.memcpy.i64(i8 *%s, i8 *%s2, i64 64, i32 16) %R=load< 16 x float > *%tmp2 ret< 16 x float > %R } declare void @llvm.memcpy.i64(i8 *nocapture, i8 *nocapture, i64, i32) nounwind which compiles to:_foo:subl $140, %esp movaps %xmm3, 112(%esp) movaps %xmm2, 96(%esp) movaps %xmm1, 80(%esp) movaps %xmm0, 64(%esp) movl 60(%esp), %eax movl %eax, 124(%esp) movl 56(%esp), %eax movl %eax, 120(%esp) movl 52(%esp), %eax< many many more 32-bit copies > movaps(%esp), %xmm0 movaps 16(%esp), %xmm1 movaps 32(%esp), %xmm2 movaps 48(%esp), %xmm3 addl $140, %esp ret On Nehalem, it may even be cheaper to just use movups when unaligned than to fall back to lower-granularity chunks. Implement processor-specific optimizations for parity with GCC on these processors. GCC does two optimizations:1. ix86_pad_returns inserts a noop before ret instructions if immediately preceded by a conditional branch or is the target of a jump. 2. ix86_avoid_jump_misspredicts inserts noops in cases where a 16-byte block of code contains more than 3 branches. The first one is done for all AMDs, Core2, and "Generic" The second one is done for:Atom, Pentium Pro, all AMDs, Pentium 4, Nocona, Core 2, and "Generic" Testcase:int x(int a) { return(a &0xf0)> >4 tmp
static nodes_iterator nodes_end(RegionInfoPass *RI)
RegionGraphTraits(Region, RegionNode)
RegionInfo & getRegionInfo()
static NodeRef getEntryNode(RegionInfo *RI)
static NodeRef getEntryNode(RegionInfoPass *RI)
value_type operator*() const
std::forward_iterator_tag iterator_category
std::forward_iterator_tag iterator_category
std::pair< NodeId, LaneBitmask > NodeRef
bool operator!=(const Self &x) const
static nodes_iterator nodes_end(RegionInfo *RI)
std::ptrdiff_t difference_type
Interval::succ_iterator succ_begin(Interval *I)
succ_begin/succ_end - define methods so that Intervals may be used just like BasicBlocks can with the...
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper node
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
@ BasicBlock
Various leaf nodes.
RNSuccIterator(NodeRef node)
Create begin iterator of a RegionNode.
RegionNodeGraphTraits(RegionNode, BasicBlock, Region)
Hierarchical RegionNode successor iterator.
std::ptrdiff_t difference_type
value_type operator*() const
static nodes_iterator nodes_begin(RegionInfo *RI)
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM BB
static nodes_iterator nodes_begin(RegionInfoPass *RI)
typename FlatIt< RegionNode * > ::UnknownGraphTypeError NodeRef