Bug Summary

File:mlir/include/mlir/IR/Value.h
Warning:line 117, column 33
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name MemRefOps.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D MLIR_CUDA_CONVERSIONS_ENABLED=1 -D MLIR_ROCM_CONVERSIONS_ENABLED=1 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I tools/mlir/lib/Dialect/MemRef/IR -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/mlir/lib/Dialect/MemRef/IR -I include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/mlir/include -I tools/mlir/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-134126-35450-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp

1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10#include "mlir/Dialect/MemRef/IR/MemRef.h"
11#include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
12#include "mlir/Dialect/StandardOps/IR/Ops.h"
13#include "mlir/Dialect/StandardOps/Utils/Utils.h"
14#include "mlir/Dialect/Utils/StaticValueUtils.h"
15#include "mlir/IR/AffineMap.h"
16#include "mlir/IR/Builders.h"
17#include "mlir/IR/BuiltinTypes.h"
18#include "mlir/IR/Matchers.h"
19#include "mlir/IR/PatternMatch.h"
20#include "mlir/IR/TypeUtilities.h"
21#include "mlir/Interfaces/InferTypeOpInterface.h"
22#include "mlir/Interfaces/ViewLikeInterface.h"
23#include "llvm/ADT/STLExtras.h"
24
25using namespace mlir;
26using namespace mlir::memref;
27
28/// Materialize a single constant operation from a given attribute value with
29/// the desired resultant type.
30Operation *MemRefDialect::materializeConstant(OpBuilder &builder,
31 Attribute value, Type type,
32 Location loc) {
33 if (arith::ConstantOp::isBuildableWith(value, type))
34 return builder.create<arith::ConstantOp>(loc, value, type);
35 if (ConstantOp::isBuildableWith(value, type))
36 return builder.create<ConstantOp>(loc, value, type);
37 return nullptr;
38}
39
40//===----------------------------------------------------------------------===//
41// Common canonicalization pattern support logic
42//===----------------------------------------------------------------------===//
43
44/// This is a common class used for patterns of the form
45/// "someop(memrefcast) -> someop". It folds the source of any memref.cast
46/// into the root operation directly.
47LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) {
48 bool folded = false;
49 for (OpOperand &operand : op->getOpOperands()) {
50 auto cast = operand.get().getDefiningOp<CastOp>();
51 if (cast && operand.get() != inner &&
52 !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
53 operand.set(cast.getOperand());
54 folded = true;
55 }
56 }
57 return success(folded);
58}
59
60/// Return an unranked/ranked tensor type for the given unranked/ranked memref
61/// type.
62Type mlir::memref::getTensorTypeFromMemRefType(Type type) {
63 if (auto memref = type.dyn_cast<MemRefType>())
64 return RankedTensorType::get(memref.getShape(), memref.getElementType());
65 if (auto memref = type.dyn_cast<UnrankedMemRefType>())
66 return UnrankedTensorType::get(memref.getElementType());
67 return NoneType::get(type.getContext());
68}
69
70//===----------------------------------------------------------------------===//
71// AllocOp / AllocaOp
72//===----------------------------------------------------------------------===//
73
74template <typename AllocLikeOp>
75static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
76 static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
77 "applies to only alloc or alloca");
78 auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
79 if (!memRefType)
80 return op.emitOpError("result must be a memref");
81
82 if (static_cast<int64_t>(op.dynamicSizes().size()) !=
83 memRefType.getNumDynamicDims())
84 return op.emitOpError("dimension operand count does not equal memref "
85 "dynamic dimension count");
86
87 unsigned numSymbols = 0;
88 if (!memRefType.getLayout().isIdentity())
89 numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols();
90 if (op.symbolOperands().size() != numSymbols)
91 return op.emitOpError("symbol operand count does not equal memref symbol "
92 "count: expected ")
93 << numSymbols << ", got " << op.symbolOperands().size();
94
95 return success();
96}
97
98static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); }
99
100static LogicalResult verify(AllocaOp op) {
101 // An alloca op needs to have an ancestor with an allocation scope trait.
102 if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
103 return op.emitOpError(
104 "requires an ancestor op with AutomaticAllocationScope trait");
105
106 return verifyAllocLikeOp(op);
107}
108
109namespace {
110/// Fold constant dimensions into an alloc like operation.
111template <typename AllocLikeOp>
112struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
113 using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
114
115 LogicalResult matchAndRewrite(AllocLikeOp alloc,
116 PatternRewriter &rewriter) const override {
117 // Check to see if any dimensions operands are constants. If so, we can
118 // substitute and drop them.
119 if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) {
120 return matchPattern(operand, matchConstantIndex());
121 }))
122 return failure();
123
124 auto memrefType = alloc.getType();
125
126 // Ok, we have one or more constant operands. Collect the non-constant ones
127 // and keep track of the resultant memref type to build.
128 SmallVector<int64_t, 4> newShapeConstants;
129 newShapeConstants.reserve(memrefType.getRank());
130 SmallVector<Value, 4> dynamicSizes;
131
132 unsigned dynamicDimPos = 0;
133 for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
134 int64_t dimSize = memrefType.getDimSize(dim);
135 // If this is already static dimension, keep it.
136 if (dimSize != -1) {
137 newShapeConstants.push_back(dimSize);
138 continue;
139 }
140 auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos];
141 auto *defOp = dynamicSize.getDefiningOp();
142 if (auto constantIndexOp =
143 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
144 // Dynamic shape dimension will be folded.
145 newShapeConstants.push_back(constantIndexOp.value());
146 } else {
147 // Dynamic shape dimension not folded; copy dynamicSize from old memref.
148 newShapeConstants.push_back(-1);
149 dynamicSizes.push_back(dynamicSize);
150 }
151 dynamicDimPos++;
152 }
153
154 // Create new memref type (which will have fewer dynamic dimensions).
155 MemRefType newMemRefType =
156 MemRefType::Builder(memrefType).setShape(newShapeConstants);
157 assert(static_cast<int64_t>(dynamicSizes.size()) ==(static_cast <bool> (static_cast<int64_t>(dynamicSizes
.size()) == newMemRefType.getNumDynamicDims()) ? void (0) : __assert_fail
("static_cast<int64_t>(dynamicSizes.size()) == newMemRefType.getNumDynamicDims()"
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 158, __extension__
__PRETTY_FUNCTION__))
158 newMemRefType.getNumDynamicDims())(static_cast <bool> (static_cast<int64_t>(dynamicSizes
.size()) == newMemRefType.getNumDynamicDims()) ? void (0) : __assert_fail
("static_cast<int64_t>(dynamicSizes.size()) == newMemRefType.getNumDynamicDims()"
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 158, __extension__
__PRETTY_FUNCTION__))
;
159
160 // Create and insert the alloc op for the new memref.
161 auto newAlloc = rewriter.create<AllocLikeOp>(
162 alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(),
163 alloc.alignmentAttr());
164 // Insert a cast so we have the same type as the old alloc.
165 auto resultCast =
166 rewriter.create<CastOp>(alloc.getLoc(), newAlloc, alloc.getType());
167
168 rewriter.replaceOp(alloc, {resultCast});
169 return success();
170 }
171};
172
173/// Fold alloc operations with no users or only store and dealloc uses.
174template <typename T>
175struct SimplifyDeadAlloc : public OpRewritePattern<T> {
176 using OpRewritePattern<T>::OpRewritePattern;
177
178 LogicalResult matchAndRewrite(T alloc,
179 PatternRewriter &rewriter) const override {
180 if (llvm::any_of(alloc->getUsers(), [&](Operation *op) {
181 if (auto storeOp = dyn_cast<StoreOp>(op))
182 return storeOp.value() == alloc;
183 return !isa<DeallocOp>(op);
184 }))
185 return failure();
186
187 for (Operation *user : llvm::make_early_inc_range(alloc->getUsers()))
188 rewriter.eraseOp(user);
189
190 rewriter.eraseOp(alloc);
191 return success();
192 }
193};
194} // namespace
195
196void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results,
197 MLIRContext *context) {
198 results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context);
199}
200
201void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
202 MLIRContext *context) {
203 results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>(
204 context);
205}
206
207//===----------------------------------------------------------------------===//
208// AllocaScopeOp
209//===----------------------------------------------------------------------===//
210
211static void print(OpAsmPrinter &p, AllocaScopeOp &op) {
212 bool printBlockTerminators = false;
213
214 p << ' ';
215 if (!op.results().empty()) {
216 p << " -> (" << op.getResultTypes() << ")";
217 printBlockTerminators = true;
218 }
219 p << ' ';
220 p.printRegion(op.bodyRegion(),
221 /*printEntryBlockArgs=*/false,
222 /*printBlockTerminators=*/printBlockTerminators);
223 p.printOptionalAttrDict(op->getAttrs());
224}
225
226static ParseResult parseAllocaScopeOp(OpAsmParser &parser,
227 OperationState &result) {
228 // Create a region for the body.
229 result.regions.reserve(1);
230 Region *bodyRegion = result.addRegion();
231
232 // Parse optional results type list.
233 if (parser.parseOptionalArrowTypeList(result.types))
234 return failure();
235
236 // Parse the body region.
237 if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{}))
238 return failure();
239 AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(),
240 result.location);
241
242 // Parse the optional attribute list.
243 if (parser.parseOptionalAttrDict(result.attributes))
244 return failure();
245
246 return success();
247}
248
249static LogicalResult verify(AllocaScopeOp op) {
250 if (failed(RegionBranchOpInterface::verifyTypes(op)))
251 return failure();
252
253 return success();
254}
255
256void AllocaScopeOp::getSuccessorRegions(
257 Optional<unsigned> index, ArrayRef<Attribute> operands,
258 SmallVectorImpl<RegionSuccessor> &regions) {
259 if (index.hasValue()) {
260 regions.push_back(RegionSuccessor(getResults()));
261 return;
262 }
263
264 regions.push_back(RegionSuccessor(&bodyRegion()));
265}
266
267//===----------------------------------------------------------------------===//
268// AssumeAlignmentOp
269//===----------------------------------------------------------------------===//
270
271static LogicalResult verify(AssumeAlignmentOp op) {
272 unsigned alignment = op.alignment();
273 if (!llvm::isPowerOf2_32(alignment))
274 return op.emitOpError("alignment must be power of 2");
275 return success();
276}
277
278//===----------------------------------------------------------------------===//
279// CastOp
280//===----------------------------------------------------------------------===//
281
282/// Determines whether MemRef_CastOp casts to a more dynamic version of the
283/// source memref. This is useful to to fold a memref.cast into a consuming op
284/// and implement canonicalization patterns for ops in different dialects that
285/// may consume the results of memref.cast operations. Such foldable memref.cast
286/// operations are typically inserted as `view` and `subview` ops are
287/// canonicalized, to preserve the type compatibility of their uses.
288///
289/// Returns true when all conditions are met:
290/// 1. source and result are ranked memrefs with strided semantics and same
291/// element type and rank.
292/// 2. each of the source's size, offset or stride has more static information
293/// than the corresponding result's size, offset or stride.
294///
295/// Example 1:
296/// ```mlir
297/// %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32>
298/// %2 = consumer %1 ... : memref<?x?xf32> ...
299/// ```
300///
301/// may fold into:
302///
303/// ```mlir
304/// %2 = consumer %0 ... : memref<8x16xf32> ...
305/// ```
306///
307/// Example 2:
308/// ```
309/// %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
310/// to memref<?x?xf32>
311/// consumer %1 : memref<?x?xf32> ...
312/// ```
313///
314/// may fold into:
315///
316/// ```
317/// consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
318/// ```
319bool CastOp::canFoldIntoConsumerOp(CastOp castOp) {
320 MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>();
321 MemRefType resultType = castOp.getType().dyn_cast<MemRefType>();
322
323 // Requires ranked MemRefType.
324 if (!sourceType || !resultType)
325 return false;
326
327 // Requires same elemental type.
328 if (sourceType.getElementType() != resultType.getElementType())
329 return false;
330
331 // Requires same rank.
332 if (sourceType.getRank() != resultType.getRank())
333 return false;
334
335 // Only fold casts between strided memref forms.
336 int64_t sourceOffset, resultOffset;
337 SmallVector<int64_t, 4> sourceStrides, resultStrides;
338 if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) ||
339 failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
340 return false;
341
342 // If cast is towards more static sizes along any dimension, don't fold.
343 for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
344 auto ss = std::get<0>(it), st = std::get<1>(it);
345 if (ss != st)
346 if (ShapedType::isDynamic(ss) && !ShapedType::isDynamic(st))
347 return false;
348 }
349
350 // If cast is towards more static offset along any dimension, don't fold.
351 if (sourceOffset != resultOffset)
352 if (ShapedType::isDynamicStrideOrOffset(sourceOffset) &&
353 !ShapedType::isDynamicStrideOrOffset(resultOffset))
354 return false;
355
356 // If cast is towards more static strides along any dimension, don't fold.
357 for (auto it : llvm::zip(sourceStrides, resultStrides)) {
358 auto ss = std::get<0>(it), st = std::get<1>(it);
359 if (ss != st)
360 if (ShapedType::isDynamicStrideOrOffset(ss) &&
361 !ShapedType::isDynamicStrideOrOffset(st))
362 return false;
363 }
364
365 return true;
366}
367
368bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
369 if (inputs.size() != 1 || outputs.size() != 1)
370 return false;
371 Type a = inputs.front(), b = outputs.front();
372 auto aT = a.dyn_cast<MemRefType>();
373 auto bT = b.dyn_cast<MemRefType>();
374
375 auto uaT = a.dyn_cast<UnrankedMemRefType>();
376 auto ubT = b.dyn_cast<UnrankedMemRefType>();
377
378 if (aT && bT) {
379 if (aT.getElementType() != bT.getElementType())
380 return false;
381 if (aT.getLayout() != bT.getLayout()) {
382 int64_t aOffset, bOffset;
383 SmallVector<int64_t, 4> aStrides, bStrides;
384 if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
385 failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
386 aStrides.size() != bStrides.size())
387 return false;
388
389 // Strides along a dimension/offset are compatible if the value in the
390 // source memref is static and the value in the target memref is the
391 // same. They are also compatible if either one is dynamic (see
392 // description of MemRefCastOp for details).
393 auto checkCompatible = [](int64_t a, int64_t b) {
394 return (a == MemRefType::getDynamicStrideOrOffset() ||
395 b == MemRefType::getDynamicStrideOrOffset() || a == b);
396 };
397 if (!checkCompatible(aOffset, bOffset))
398 return false;
399 for (const auto &aStride : enumerate(aStrides))
400 if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
401 return false;
402 }
403 if (aT.getMemorySpace() != bT.getMemorySpace())
404 return false;
405
406 // They must have the same rank, and any specified dimensions must match.
407 if (aT.getRank() != bT.getRank())
408 return false;
409
410 for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
411 int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
412 if (aDim != -1 && bDim != -1 && aDim != bDim)
413 return false;
414 }
415 return true;
416 } else {
417 if (!aT && !uaT)
418 return false;
419 if (!bT && !ubT)
420 return false;
421 // Unranked to unranked casting is unsupported
422 if (uaT && ubT)
423 return false;
424
425 auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
426 auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
427 if (aEltType != bEltType)
428 return false;
429
430 auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
431 auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
432 return aMemSpace == bMemSpace;
433 }
434
435 return false;
436}
437
438OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) {
439 return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
440}
441
442//===----------------------------------------------------------------------===//
443// CopyOp
444//===----------------------------------------------------------------------===//
445
446namespace {
447/// If the source/target of a CopyOp is a CastOp that does not modify the shape
448/// and element type, the cast can be skipped. Such CastOps only cast the layout
449/// of the type.
450struct FoldCopyOfCast : public OpRewritePattern<CopyOp> {
451 using OpRewritePattern<CopyOp>::OpRewritePattern;
452
453 LogicalResult matchAndRewrite(CopyOp copyOp,
454 PatternRewriter &rewriter) const override {
455 bool modified = false;
456
457 // Check source.
458 if (auto castOp = copyOp.source().getDefiningOp<CastOp>()) {
459 auto fromType = castOp.source().getType().dyn_cast<MemRefType>();
460 auto toType = castOp.source().getType().dyn_cast<MemRefType>();
461
462 if (fromType && toType) {
463 if (fromType.getShape() == toType.getShape() &&
464 fromType.getElementType() == toType.getElementType()) {
465 rewriter.updateRootInPlace(
466 copyOp, [&] { copyOp.sourceMutable().assign(castOp.source()); });
467 modified = true;
468 }
469 }
470 }
471
472 // Check target.
473 if (auto castOp = copyOp.target().getDefiningOp<CastOp>()) {
474 auto fromType = castOp.source().getType().dyn_cast<MemRefType>();
475 auto toType = castOp.source().getType().dyn_cast<MemRefType>();
476
477 if (fromType && toType) {
478 if (fromType.getShape() == toType.getShape() &&
479 fromType.getElementType() == toType.getElementType()) {
480 rewriter.updateRootInPlace(
481 copyOp, [&] { copyOp.targetMutable().assign(castOp.source()); });
482 modified = true;
483 }
484 }
485 }
486
487 return success(modified);
488 }
489};
490
491/// Fold memref.copy(%x, %x).
492struct FoldSelfCopy : public OpRewritePattern<CopyOp> {
493 using OpRewritePattern<CopyOp>::OpRewritePattern;
494
495 LogicalResult matchAndRewrite(CopyOp copyOp,
496 PatternRewriter &rewriter) const override {
497 if (copyOp.source() != copyOp.target())
498 return failure();
499
500 rewriter.eraseOp(copyOp);
501 return success();
502 }
503};
504} // namespace
505
506void CopyOp::getCanonicalizationPatterns(RewritePatternSet &results,
507 MLIRContext *context) {
508 results.add<FoldCopyOfCast, FoldSelfCopy>(context);
509}
510
511//===----------------------------------------------------------------------===//
512// DeallocOp
513//===----------------------------------------------------------------------===//
514
515LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
516 SmallVectorImpl<OpFoldResult> &results) {
517 /// dealloc(memrefcast) -> dealloc
518 return foldMemRefCast(*this);
519}
520
521//===----------------------------------------------------------------------===//
522// DimOp
523//===----------------------------------------------------------------------===//
524
525void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
526 int64_t index) {
527 auto loc = result.location;
528 Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
529 build(builder, result, source, indexValue);
530}
531
532void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
533 Value index) {
534 auto indexTy = builder.getIndexType();
535 build(builder, result, indexTy, source, index);
536}
537
538Optional<int64_t> DimOp::getConstantIndex() {
539 if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
540 return constantOp.getValue().cast<IntegerAttr>().getInt();
541 return {};
542}
543
544static LogicalResult verify(DimOp op) {
545 // Assume unknown index to be in range.
546 Optional<int64_t> index = op.getConstantIndex();
547 if (!index.hasValue())
548 return success();
549
550 // Check that constant index is not knowingly out of range.
551 auto type = op.source().getType();
552 if (auto memrefType = type.dyn_cast<MemRefType>()) {
553 if (index.getValue() >= memrefType.getRank())
554 return op.emitOpError("index is out of range");
555 } else if (type.isa<UnrankedMemRefType>()) {
556 // Assume index to be in range.
557 } else {
558 llvm_unreachable("expected operand with memref type")::llvm::llvm_unreachable_internal("expected operand with memref type"
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 558)
;
559 }
560 return success();
561}
562
563/// Return a map with key being elements in `vals` and data being number of
564/// occurences of it. Use std::map, since the `vals` here are strides and the
565/// dynamic stride value is the same as the tombstone value for
566/// `DenseMap<int64_t>`.
567static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
568 std::map<int64_t, unsigned> numOccurences;
569 for (auto val : vals)
570 numOccurences[val]++;
571 return numOccurences;
572}
573
574/// Given the `originalType` and a `candidateReducedType` whose shape is assumed
575/// to be a subset of `originalType` with some `1` entries erased, return the
576/// set of indices that specifies which of the entries of `originalShape` are
577/// dropped to obtain `reducedShape`.
578/// This accounts for cases where there are multiple unit-dims, but only a
579/// subset of those are dropped. For MemRefTypes these can be disambiguated
580/// using the strides. If a dimension is dropped the stride must be dropped too.
581static llvm::Optional<llvm::SmallDenseSet<unsigned>>
582computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
583 ArrayRef<OpFoldResult> sizes) {
584 llvm::SmallDenseSet<unsigned> unusedDims;
585 if (originalType.getRank() == reducedType.getRank())
586 return unusedDims;
587
588 for (const auto &dim : llvm::enumerate(sizes))
589 if (auto attr = dim.value().dyn_cast<Attribute>())
590 if (attr.cast<IntegerAttr>().getInt() == 1)
591 unusedDims.insert(dim.index());
592
593 SmallVector<int64_t> originalStrides, candidateStrides;
594 int64_t originalOffset, candidateOffset;
595 if (failed(
596 getStridesAndOffset(originalType, originalStrides, originalOffset)) ||
597 failed(
598 getStridesAndOffset(reducedType, candidateStrides, candidateOffset)))
599 return llvm::None;
600
601 // For memrefs, a dimension is truly dropped if its corresponding stride is
602 // also dropped. This is particularly important when more than one of the dims
603 // is 1. Track the number of occurences of the strides in the original type
604 // and the candidate type. For each unused dim that stride should not be
605 // present in the candidate type. Note that there could be multiple dimensions
606 // that have the same size. We dont need to exactly figure out which dim
607 // corresponds to which stride, we just need to verify that the number of
608 // reptitions of a stride in the original + number of unused dims with that
609 // stride == number of repititions of a stride in the candidate.
610 std::map<int64_t, unsigned> currUnaccountedStrides =
611 getNumOccurences(originalStrides);
612 std::map<int64_t, unsigned> candidateStridesNumOccurences =
613 getNumOccurences(candidateStrides);
614 llvm::SmallDenseSet<unsigned> prunedUnusedDims;
615 for (unsigned dim : unusedDims) {
616 int64_t originalStride = originalStrides[dim];
617 if (currUnaccountedStrides[originalStride] >
618 candidateStridesNumOccurences[originalStride]) {
619 // This dim can be treated as dropped.
620 currUnaccountedStrides[originalStride]--;
621 continue;
622 }
623 if (currUnaccountedStrides[originalStride] ==
624 candidateStridesNumOccurences[originalStride]) {
625 // The stride for this is not dropped. Keep as is.
626 prunedUnusedDims.insert(dim);
627 continue;
628 }
629 if (currUnaccountedStrides[originalStride] <
630 candidateStridesNumOccurences[originalStride]) {
631 // This should never happen. Cant have a stride in the reduced rank type
632 // that wasnt in the original one.
633 return llvm::None;
634 }
635 }
636
637 for (auto prunedDim : prunedUnusedDims)
638 unusedDims.erase(prunedDim);
639 if (unusedDims.size() + reducedType.getRank() != originalType.getRank())
640 return llvm::None;
641 return unusedDims;
642}
643
644llvm::SmallDenseSet<unsigned> SubViewOp::getDroppedDims() {
645 MemRefType sourceType = getSourceType();
646 MemRefType resultType = getType();
647 llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims =
648 computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
649 assert(unusedDims && "unable to find unused dims of subview")(static_cast <bool> (unusedDims && "unable to find unused dims of subview"
) ? void (0) : __assert_fail ("unusedDims && \"unable to find unused dims of subview\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 649, __extension__
__PRETTY_FUNCTION__))
;
650 return *unusedDims;
651}
652
653OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
654 // All forms of folding require a known index.
655 auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
656 if (!index)
657 return {};
658
659 // Folding for unranked types (UnrankedMemRefType) is not supported.
660 auto memrefType = source().getType().dyn_cast<MemRefType>();
661 if (!memrefType)
662 return {};
663
664 // Fold if the shape extent along the given index is known.
665 if (!memrefType.isDynamicDim(index.getInt())) {
666 Builder builder(getContext());
667 return builder.getIndexAttr(memrefType.getShape()[index.getInt()]);
668 }
669
670 // The size at the given index is now known to be a dynamic size.
671 unsigned unsignedIndex = index.getValue().getZExtValue();
672
673 // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
674 Operation *definingOp = source().getDefiningOp();
675
676 if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp))
677 return *(alloc.getDynamicSizes().begin() +
678 memrefType.getDynamicDimIndex(unsignedIndex));
679
680 if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp))
681 return *(alloca.getDynamicSizes().begin() +
682 memrefType.getDynamicDimIndex(unsignedIndex));
683
684 if (auto view = dyn_cast_or_null<ViewOp>(definingOp))
685 return *(view.getDynamicSizes().begin() +
686 memrefType.getDynamicDimIndex(unsignedIndex));
687
688 if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) {
689 llvm::SmallDenseSet<unsigned> unusedDims = subview.getDroppedDims();
690 unsigned resultIndex = 0;
691 unsigned sourceRank = subview.getSourceType().getRank();
692 unsigned sourceIndex = 0;
693 for (auto i : llvm::seq<unsigned>(0, sourceRank)) {
694 if (unusedDims.count(i))
695 continue;
696 if (resultIndex == unsignedIndex) {
697 sourceIndex = i;
698 break;
699 }
700 resultIndex++;
701 }
702 assert(subview.isDynamicSize(sourceIndex) &&(static_cast <bool> (subview.isDynamicSize(sourceIndex)
&& "expected dynamic subview size") ? void (0) : __assert_fail
("subview.isDynamicSize(sourceIndex) && \"expected dynamic subview size\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 703, __extension__
__PRETTY_FUNCTION__))
703 "expected dynamic subview size")(static_cast <bool> (subview.isDynamicSize(sourceIndex)
&& "expected dynamic subview size") ? void (0) : __assert_fail
("subview.isDynamicSize(sourceIndex) && \"expected dynamic subview size\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 703, __extension__
__PRETTY_FUNCTION__))
;
704 return subview.getDynamicSize(sourceIndex);
705 }
706
707 if (auto sizeInterface =
708 dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
709 assert(sizeInterface.isDynamicSize(unsignedIndex) &&(static_cast <bool> (sizeInterface.isDynamicSize(unsignedIndex
) && "Expected dynamic subview size") ? void (0) : __assert_fail
("sizeInterface.isDynamicSize(unsignedIndex) && \"Expected dynamic subview size\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 710, __extension__
__PRETTY_FUNCTION__))
710 "Expected dynamic subview size")(static_cast <bool> (sizeInterface.isDynamicSize(unsignedIndex
) && "Expected dynamic subview size") ? void (0) : __assert_fail
("sizeInterface.isDynamicSize(unsignedIndex) && \"Expected dynamic subview size\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 710, __extension__
__PRETTY_FUNCTION__))
;
711 return sizeInterface.getDynamicSize(unsignedIndex);
712 }
713
714 // dim(memrefcast) -> dim
715 if (succeeded(foldMemRefCast(*this)))
716 return getResult();
717
718 return {};
719}
720
721namespace {
722/// Fold dim of a memref reshape operation to a load into the reshape's shape
723/// operand.
724struct DimOfMemRefReshape : public OpRewritePattern<DimOp> {
725 using OpRewritePattern<DimOp>::OpRewritePattern;
726
727 LogicalResult matchAndRewrite(DimOp dim,
728 PatternRewriter &rewriter) const override {
729 auto reshape = dim.source().getDefiningOp<ReshapeOp>();
730
731 if (!reshape)
732 return failure();
733
734 // Place the load directly after the reshape to ensure that the shape memref
735 // was not mutated.
736 rewriter.setInsertionPointAfter(reshape);
737 Location loc = dim.getLoc();
738 Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index());
739 if (load.getType() != dim.getType())
740 load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load);
741 rewriter.replaceOp(dim, load);
742 return success();
743 }
744};
745
746} // namespace
747
748void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
749 MLIRContext *context) {
750 results.add<DimOfMemRefReshape>(context);
751}
752
753// ---------------------------------------------------------------------------
754// DmaStartOp
755// ---------------------------------------------------------------------------
756
757void DmaStartOp::build(OpBuilder &builder, OperationState &result,
758 Value srcMemRef, ValueRange srcIndices, Value destMemRef,
759 ValueRange destIndices, Value numElements,
760 Value tagMemRef, ValueRange tagIndices, Value stride,
761 Value elementsPerStride) {
762 result.addOperands(srcMemRef);
763 result.addOperands(srcIndices);
764 result.addOperands(destMemRef);
765 result.addOperands(destIndices);
766 result.addOperands({numElements, tagMemRef});
767 result.addOperands(tagIndices);
768 if (stride)
769 result.addOperands({stride, elementsPerStride});
770}
771
772static void print(OpAsmPrinter &p, DmaStartOp op) {
773 p << " " << op.getSrcMemRef() << '[' << op.getSrcIndices() << "], "
774 << op.getDstMemRef() << '[' << op.getDstIndices() << "], "
775 << op.getNumElements() << ", " << op.getTagMemRef() << '['
776 << op.getTagIndices() << ']';
777 if (op.isStrided())
778 p << ", " << op.getStride() << ", " << op.getNumElementsPerStride();
779
780 p.printOptionalAttrDict(op->getAttrs());
781 p << " : " << op.getSrcMemRef().getType() << ", "
782 << op.getDstMemRef().getType() << ", " << op.getTagMemRef().getType();
783}
784
785// Parse DmaStartOp.
786// Ex:
787// %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
788// %tag[%index], %stride, %num_elt_per_stride :
789// : memref<3076 x f32, 0>,
790// memref<1024 x f32, 2>,
791// memref<1 x i32>
792//
793static ParseResult parseDmaStartOp(OpAsmParser &parser,
794 OperationState &result) {
795 OpAsmParser::OperandType srcMemRefInfo;
796 SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos;
797 OpAsmParser::OperandType dstMemRefInfo;
798 SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos;
799 OpAsmParser::OperandType numElementsInfo;
800 OpAsmParser::OperandType tagMemrefInfo;
801 SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos;
802 SmallVector<OpAsmParser::OperandType, 2> strideInfo;
803
804 SmallVector<Type, 3> types;
805 auto indexType = parser.getBuilder().getIndexType();
806
807 // Parse and resolve the following list of operands:
808 // *) source memref followed by its indices (in square brackets).
809 // *) destination memref followed by its indices (in square brackets).
810 // *) dma size in KiB.
811 if (parser.parseOperand(srcMemRefInfo) ||
812 parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
813 parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
814 parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
815 parser.parseComma() || parser.parseOperand(numElementsInfo) ||
816 parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
817 parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
818 return failure();
819
820 // Parse optional stride and elements per stride.
821 if (parser.parseTrailingOperandList(strideInfo))
822 return failure();
823
824 bool isStrided = strideInfo.size() == 2;
825 if (!strideInfo.empty() && !isStrided) {
826 return parser.emitError(parser.getNameLoc(),
827 "expected two stride related operands");
828 }
829
830 if (parser.parseColonTypeList(types))
831 return failure();
832 if (types.size() != 3)
833 return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
834
835 if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
836 parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
837 parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
838 parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
839 // size should be an index.
840 parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
841 parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
842 // tag indices should be index.
843 parser.resolveOperands(tagIndexInfos, indexType, result.operands))
844 return failure();
845
846 if (isStrided) {
847 if (parser.resolveOperands(strideInfo, indexType, result.operands))
848 return failure();
849 }
850
851 return success();
852}
853
854static LogicalResult verify(DmaStartOp op) {
855 unsigned numOperands = op.getNumOperands();
856
857 // Mandatory non-variadic operands are: src memref, dst memref, tag memref and
858 // the number of elements.
859 if (numOperands < 4)
1
Assuming 'numOperands' is >= 4
2
Taking false branch
860 return op.emitOpError("expected at least 4 operands");
861
862 // Check types of operands. The order of these calls is important: the later
863 // calls rely on some type properties to compute the operand position.
864 // 1. Source memref.
865 if (!op.getSrcMemRef().getType().isa<MemRefType>())
3
Taking false branch
866 return op.emitOpError("expected source to be of memref type");
867 if (numOperands < op.getSrcMemRefRank() + 4)
4
Assuming the condition is false
868 return op.emitOpError()
869 << "expected at least " << op.getSrcMemRefRank() + 4 << " operands";
870 if (!op.getSrcIndices().empty() &&
5
Assuming the condition is false
6
Taking false branch
871 !llvm::all_of(op.getSrcIndices().getTypes(),
872 [](Type t) { return t.isIndex(); }))
873 return op.emitOpError("expected source indices to be of index type");
874
875 // 2. Destination memref.
876 if (!op.getDstMemRef().getType().isa<MemRefType>())
7
Taking false branch
877 return op.emitOpError("expected destination to be of memref type");
878 unsigned numExpectedOperands =
879 op.getSrcMemRefRank() + op.getDstMemRefRank() + 4;
880 if (numOperands < numExpectedOperands)
8
Assuming 'numOperands' is >= 'numExpectedOperands'
881 return op.emitOpError()
882 << "expected at least " << numExpectedOperands << " operands";
883 if (!op.getDstIndices().empty() &&
9
Assuming the condition is false
10
Taking false branch
884 !llvm::all_of(op.getDstIndices().getTypes(),
885 [](Type t) { return t.isIndex(); }))
886 return op.emitOpError("expected destination indices to be of index type");
887
888 // 3. Number of elements.
889 if (!op.getNumElements().getType().isIndex())
11
Assuming the condition is false
12
Taking false branch
890 return op.emitOpError("expected num elements to be of index type");
891
892 // 4. Tag memref.
893 if (!op.getTagMemRef().getType().isa<MemRefType>())
13
Taking false branch
894 return op.emitOpError("expected tag to be of memref type");
895 numExpectedOperands += op.getTagMemRefRank();
896 if (numOperands < numExpectedOperands)
14
Assuming 'numOperands' is >= 'numExpectedOperands'
897 return op.emitOpError()
898 << "expected at least " << numExpectedOperands << " operands";
899 if (!op.getTagIndices().empty() &&
15
Assuming the condition is false
900 !llvm::all_of(op.getTagIndices().getTypes(),
901 [](Type t) { return t.isIndex(); }))
902 return op.emitOpError("expected tag indices to be of index type");
903
904 // Optional stride-related operands must be either both present or both
905 // absent.
906 if (numOperands != numExpectedOperands &&
16
Assuming 'numOperands' is equal to 'numExpectedOperands'
907 numOperands != numExpectedOperands + 2)
908 return op.emitOpError("incorrect number of operands");
909
910 // 5. Strides.
911 if (op.isStrided()) {
912 if (!op.getStride().getType().isIndex() ||
17
Calling 'DmaStartOp::getStride'
27
Returning from 'DmaStartOp::getStride'
28
Calling 'Value::getType'
913 !op.getNumElementsPerStride().getType().isIndex())
914 return op.emitOpError(
915 "expected stride and num elements per stride to be of type index");
916 }
917
918 return success();
919}
920
921LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
922 SmallVectorImpl<OpFoldResult> &results) {
923 /// dma_start(memrefcast) -> dma_start
924 return foldMemRefCast(*this);
925}
926
927// ---------------------------------------------------------------------------
928// DmaWaitOp
929// ---------------------------------------------------------------------------
930
931LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
932 SmallVectorImpl<OpFoldResult> &results) {
933 /// dma_wait(memrefcast) -> dma_wait
934 return foldMemRefCast(*this);
935}
936
937static LogicalResult verify(DmaWaitOp op) {
938 // Check that the number of tag indices matches the tagMemRef rank.
939 unsigned numTagIndices = op.tagIndices().size();
940 unsigned tagMemRefRank = op.getTagMemRefRank();
941 if (numTagIndices != tagMemRefRank)
942 return op.emitOpError() << "expected tagIndices to have the same number of "
943 "elements as the tagMemRef rank, expected "
944 << tagMemRefRank << ", but got " << numTagIndices;
945 return success();
946}
947
948//===----------------------------------------------------------------------===//
949// GlobalOp
950//===----------------------------------------------------------------------===//
951
952static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op,
953 TypeAttr type,
954 Attribute initialValue) {
955 p << type;
956 if (!op.isExternal()) {
957 p << " = ";
958 if (op.isUninitialized())
959 p << "uninitialized";
960 else
961 p.printAttributeWithoutType(initialValue);
962 }
963}
964
965static ParseResult
966parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr,
967 Attribute &initialValue) {
968 Type type;
969 if (parser.parseType(type))
970 return failure();
971
972 auto memrefType = type.dyn_cast<MemRefType>();
973 if (!memrefType || !memrefType.hasStaticShape())
974 return parser.emitError(parser.getNameLoc())
975 << "type should be static shaped memref, but got " << type;
976 typeAttr = TypeAttr::get(type);
977
978 if (parser.parseOptionalEqual())
979 return success();
980
981 if (succeeded(parser.parseOptionalKeyword("uninitialized"))) {
982 initialValue = UnitAttr::get(parser.getContext());
983 return success();
984 }
985
986 Type tensorType = getTensorTypeFromMemRefType(memrefType);
987 if (parser.parseAttribute(initialValue, tensorType))
988 return failure();
989 if (!initialValue.isa<ElementsAttr>())
990 return parser.emitError(parser.getNameLoc())
991 << "initial value should be a unit or elements attribute";
992 return success();
993}
994
995static LogicalResult verify(GlobalOp op) {
996 auto memrefType = op.type().dyn_cast<MemRefType>();
997 if (!memrefType || !memrefType.hasStaticShape())
998 return op.emitOpError("type should be static shaped memref, but got ")
999 << op.type();
1000
1001 // Verify that the initial value, if present, is either a unit attribute or
1002 // an elements attribute.
1003 if (op.initial_value().hasValue()) {
1004 Attribute initValue = op.initial_value().getValue();
1005 if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
1006 return op.emitOpError("initial value should be a unit or elements "
1007 "attribute, but got ")
1008 << initValue;
1009
1010 // Check that the type of the initial value is compatible with the type of
1011 // the global variable.
1012 if (initValue.isa<ElementsAttr>()) {
1013 Type initType = initValue.getType();
1014 Type tensorType = getTensorTypeFromMemRefType(memrefType);
1015 if (initType != tensorType)
1016 return op.emitOpError("initial value expected to be of type ")
1017 << tensorType << ", but was of type " << initType;
1018 }
1019 }
1020
1021 if (Optional<uint64_t> alignAttr = op.alignment()) {
1022 uint64_t alignment = alignAttr.getValue();
1023
1024 if (!llvm::isPowerOf2_64(alignment))
1025 return op->emitError() << "alignment attribute value " << alignment
1026 << " is not a power of 2";
1027 }
1028
1029 // TODO: verify visibility for declarations.
1030 return success();
1031}
1032
1033//===----------------------------------------------------------------------===//
1034// GetGlobalOp
1035//===----------------------------------------------------------------------===//
1036
1037LogicalResult
1038GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
1039 // Verify that the result type is same as the type of the referenced
1040 // memref.global op.
1041 auto global =
1042 symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr());
1043 if (!global)
1044 return emitOpError("'")
1045 << name() << "' does not reference a valid global memref";
1046
1047 Type resultType = result().getType();
1048 if (global.type() != resultType)
1049 return emitOpError("result type ")
1050 << resultType << " does not match type " << global.type()
1051 << " of the global memref @" << name();
1052 return success();
1053}
1054
1055//===----------------------------------------------------------------------===//
1056// LoadOp
1057//===----------------------------------------------------------------------===//
1058
1059static LogicalResult verify(LoadOp op) {
1060 if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
1061 return op.emitOpError("incorrect number of indices for load");
1062 return success();
1063}
1064
1065OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
1066 /// load(memrefcast) -> load
1067 if (succeeded(foldMemRefCast(*this)))
1068 return getResult();
1069 return OpFoldResult();
1070}
1071
1072//===----------------------------------------------------------------------===//
1073// PrefetchOp
1074//===----------------------------------------------------------------------===//
1075
1076static void print(OpAsmPrinter &p, PrefetchOp op) {
1077 p << " " << op.memref() << '[';
1078 p.printOperands(op.indices());
1079 p << ']' << ", " << (op.isWrite() ? "write" : "read");
1080 p << ", locality<" << op.localityHint();
1081 p << ">, " << (op.isDataCache() ? "data" : "instr");
1082 p.printOptionalAttrDict(
1083 op->getAttrs(),
1084 /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
1085 p << " : " << op.getMemRefType();
1086}
1087
1088static ParseResult parsePrefetchOp(OpAsmParser &parser,
1089 OperationState &result) {
1090 OpAsmParser::OperandType memrefInfo;
1091 SmallVector<OpAsmParser::OperandType, 4> indexInfo;
1092 IntegerAttr localityHint;
1093 MemRefType type;
1094 StringRef readOrWrite, cacheType;
1095
1096 auto indexTy = parser.getBuilder().getIndexType();
1097 auto i32Type = parser.getBuilder().getIntegerType(32);
1098 if (parser.parseOperand(memrefInfo) ||
1099 parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
1100 parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
1101 parser.parseComma() || parser.parseKeyword("locality") ||
1102 parser.parseLess() ||
1103 parser.parseAttribute(localityHint, i32Type, "localityHint",
1104 result.attributes) ||
1105 parser.parseGreater() || parser.parseComma() ||
1106 parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
1107 parser.resolveOperand(memrefInfo, type, result.operands) ||
1108 parser.resolveOperands(indexInfo, indexTy, result.operands))
1109 return failure();
1110
1111 if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
1112 return parser.emitError(parser.getNameLoc(),
1113 "rw specifier has to be 'read' or 'write'");
1114 result.addAttribute(
1115 PrefetchOp::getIsWriteAttrName(),
1116 parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
1117
1118 if (!cacheType.equals("data") && !cacheType.equals("instr"))
1119 return parser.emitError(parser.getNameLoc(),
1120 "cache type has to be 'data' or 'instr'");
1121
1122 result.addAttribute(
1123 PrefetchOp::getIsDataCacheAttrName(),
1124 parser.getBuilder().getBoolAttr(cacheType.equals("data")));
1125
1126 return success();
1127}
1128
1129static LogicalResult verify(PrefetchOp op) {
1130 if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
1131 return op.emitOpError("too few indices");
1132
1133 return success();
1134}
1135
1136LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
1137 SmallVectorImpl<OpFoldResult> &results) {
1138 // prefetch(memrefcast) -> prefetch
1139 return foldMemRefCast(*this);
1140}
1141
1142//===----------------------------------------------------------------------===//
1143// RankOp
1144//===----------------------------------------------------------------------===//
1145
1146OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
1147 // Constant fold rank when the rank of the operand is known.
1148 auto type = getOperand().getType();
1149 auto shapedType = type.dyn_cast<ShapedType>();
1150 if (shapedType && shapedType.hasRank())
1151 return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
1152 return IntegerAttr();
1153}
1154
1155//===----------------------------------------------------------------------===//
1156// ReinterpretCastOp
1157//===----------------------------------------------------------------------===//
1158
1159/// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`,
1160/// `staticSizes` and `staticStrides` are automatically filled with
1161/// source-memref-rank sentinel values that encode dynamic entries.
1162void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1163 MemRefType resultType, Value source,
1164 OpFoldResult offset, ArrayRef<OpFoldResult> sizes,
1165 ArrayRef<OpFoldResult> strides,
1166 ArrayRef<NamedAttribute> attrs) {
1167 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1168 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1169 dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
1170 ShapedType::kDynamicStrideOrOffset);
1171 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1172 ShapedType::kDynamicSize);
1173 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1174 ShapedType::kDynamicStrideOrOffset);
1175 build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1176 dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1177 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1178 result.addAttributes(attrs);
1179}
1180
1181void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1182 MemRefType resultType, Value source,
1183 int64_t offset, ArrayRef<int64_t> sizes,
1184 ArrayRef<int64_t> strides,
1185 ArrayRef<NamedAttribute> attrs) {
1186 SmallVector<OpFoldResult> sizeValues =
1187 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1188 return b.getI64IntegerAttr(v);
1189 }));
1190 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1191 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1192 return b.getI64IntegerAttr(v);
1193 }));
1194 build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues,
1195 strideValues, attrs);
1196}
1197
1198void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1199 MemRefType resultType, Value source, Value offset,
1200 ValueRange sizes, ValueRange strides,
1201 ArrayRef<NamedAttribute> attrs) {
1202 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1203 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1204 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1205 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1206 build(b, result, resultType, source, offset, sizeValues, strideValues, attrs);
1207}
1208
1209// TODO: ponder whether we want to allow missing trailing sizes/strides that are
1210// completed automatically, like we have for subview and extract_slice.
1211static LogicalResult verify(ReinterpretCastOp op) {
1212 // The source and result memrefs should be in the same memory space.
1213 auto srcType = op.source().getType().cast<BaseMemRefType>();
1214 auto resultType = op.getType().cast<MemRefType>();
1215 if (srcType.getMemorySpace() != resultType.getMemorySpace())
1216 return op.emitError("different memory spaces specified for source type ")
1217 << srcType << " and result memref type " << resultType;
1218 if (srcType.getElementType() != resultType.getElementType())
1219 return op.emitError("different element types specified for source type ")
1220 << srcType << " and result memref type " << resultType;
1221
1222 // Match sizes in result memref type and in static_sizes attribute.
1223 for (auto &en :
1224 llvm::enumerate(llvm::zip(resultType.getShape(),
1225 extractFromI64ArrayAttr(op.static_sizes())))) {
1226 int64_t resultSize = std::get<0>(en.value());
1227 int64_t expectedSize = std::get<1>(en.value());
1228 if (!ShapedType::isDynamic(resultSize) &&
1229 !ShapedType::isDynamic(expectedSize) && resultSize != expectedSize)
1230 return op.emitError("expected result type with size = ")
1231 << expectedSize << " instead of " << resultSize
1232 << " in dim = " << en.index();
1233 }
1234
1235 // Match offset and strides in static_offset and static_strides attributes. If
1236 // result memref type has no affine map specified, this will assume an
1237 // identity layout.
1238 int64_t resultOffset;
1239 SmallVector<int64_t, 4> resultStrides;
1240 if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
1241 return op.emitError(
1242 "expected result type to have strided layout but found ")
1243 << resultType;
1244
1245 // Match offset in result memref type and in static_offsets attribute.
1246 int64_t expectedOffset = extractFromI64ArrayAttr(op.static_offsets()).front();
1247 if (!ShapedType::isDynamicStrideOrOffset(resultOffset) &&
1248 !ShapedType::isDynamicStrideOrOffset(expectedOffset) &&
1249 resultOffset != expectedOffset)
1250 return op.emitError("expected result type with offset = ")
1251 << resultOffset << " instead of " << expectedOffset;
1252
1253 // Match strides in result memref type and in static_strides attribute.
1254 for (auto &en : llvm::enumerate(llvm::zip(
1255 resultStrides, extractFromI64ArrayAttr(op.static_strides())))) {
1256 int64_t resultStride = std::get<0>(en.value());
1257 int64_t expectedStride = std::get<1>(en.value());
1258 if (!ShapedType::isDynamicStrideOrOffset(resultStride) &&
1259 !ShapedType::isDynamicStrideOrOffset(expectedStride) &&
1260 resultStride != expectedStride)
1261 return op.emitError("expected result type with stride = ")
1262 << expectedStride << " instead of " << resultStride
1263 << " in dim = " << en.index();
1264 }
1265
1266 return success();
1267}
1268
1269//===----------------------------------------------------------------------===//
1270// Reassociative reshape ops
1271//===----------------------------------------------------------------------===//
1272
1273SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
1274 return getSymbolLessAffineMaps(getReassociationExprs());
1275}
1276SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
1277 return convertReassociationIndicesToExprs(getContext(),
1278 getReassociationIndices());
1279}
1280
1281SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
1282 return getSymbolLessAffineMaps(getReassociationExprs());
1283}
1284SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
1285 return convertReassociationIndicesToExprs(getContext(),
1286 getReassociationIndices());
1287}
1288
1289static void print(OpAsmPrinter &p, ExpandShapeOp op) {
1290 ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
1291}
1292
1293static void print(OpAsmPrinter &p, CollapseShapeOp op) {
1294 ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
1295}
1296
1297/// Detect whether memref dims [dim, dim + extent) can be reshaped without
1298/// copies.
1299static bool isReshapableDimBand(unsigned dim, unsigned extent,
1300 ArrayRef<int64_t> sizes,
1301 ArrayRef<AffineExpr> strides) {
1302 // Bands of extent one can be reshaped, as they are not reshaped at all.
1303 if (extent == 1)
1304 return true;
1305 // Otherwise, the size of the first dimension needs to be known.
1306 if (ShapedType::isDynamic(sizes[dim]))
1307 return false;
1308 assert(sizes.size() == strides.size() && "mismatched ranks")(static_cast <bool> (sizes.size() == strides.size() &&
"mismatched ranks") ? void (0) : __assert_fail ("sizes.size() == strides.size() && \"mismatched ranks\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1308, __extension__
__PRETTY_FUNCTION__))
;
1309 // off by 1 indexing to avoid out of bounds
1310 // V
1311 for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) {
1312 // Only bands of static shapes are reshapable. This is due to the fact that
1313 // there is no relation between dynamic sizes and dynamic strides: we do not
1314 // have enough information to know whether a "-1" size corresponds to the
1315 // proper symbol in the AffineExpr of a stride.
1316 if (ShapedType::isDynamic(sizes[idx + 1]))
1317 return false;
1318 // TODO: Refine this by passing the proper nDims and nSymbols so we can
1319 // simplify on the fly and catch more reshapable cases.
1320 if (strides[idx] != strides[idx + 1] * sizes[idx + 1])
1321 return false;
1322 }
1323 return true;
1324}
1325
1326/// Compute the MemRefType obtained by applying the `reassociation` (which is
1327/// expected to be valid) to `type`.
1328/// If `type` is Contiguous MemRefType, this always produce a contiguous
1329/// MemRefType.
1330static MemRefType
1331computeReshapeCollapsedType(MemRefType type,
1332 ArrayRef<AffineMap> reassociation) {
1333 auto sizes = type.getShape();
1334 AffineExpr offset;
1335 SmallVector<AffineExpr, 4> strides;
1336 auto status = getStridesAndOffset(type, strides, offset);
1337 (void)status;
1338 assert(succeeded(status) && "expected strided memref")(static_cast <bool> (succeeded(status) && "expected strided memref"
) ? void (0) : __assert_fail ("succeeded(status) && \"expected strided memref\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1338, __extension__
__PRETTY_FUNCTION__))
;
1339
1340 SmallVector<int64_t, 4> newSizes;
1341 newSizes.reserve(reassociation.size());
1342 SmallVector<AffineExpr, 4> newStrides;
1343 newStrides.reserve(reassociation.size());
1344
1345 // Use the fact that reassociation is valid to simplify the logic: only use
1346 // each map's rank.
1347 assert(isReassociationValid(reassociation) && "invalid reassociation")(static_cast <bool> (isReassociationValid(reassociation
) && "invalid reassociation") ? void (0) : __assert_fail
("isReassociationValid(reassociation) && \"invalid reassociation\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1347, __extension__
__PRETTY_FUNCTION__))
;
1348 unsigned currentDim = 0;
1349 for (AffineMap m : reassociation) {
1350 unsigned dim = m.getNumResults();
1351 int64_t size = 1;
1352 AffineExpr stride = strides[currentDim + dim - 1];
1353 if (!isReshapableDimBand(currentDim, dim, sizes, strides)) {
1354 size = ShapedType::kDynamicSize;
1355 stride = AffineExpr();
1356 } else {
1357 for (unsigned d = 0; d < dim; ++d)
1358 size *= sizes[currentDim + d];
1359 }
1360 newSizes.push_back(size);
1361 newStrides.push_back(stride);
1362 currentDim += dim;
1363 }
1364
1365 // Early-exit: if `type` is contiguous, the result must be contiguous.
1366 if (canonicalizeStridedLayout(type).getLayout().isIdentity())
1367 return MemRefType::Builder(type).setShape(newSizes).setLayout({});
1368
1369 // Convert back to int64_t because we don't have enough information to create
1370 // new strided layouts from AffineExpr only. This corresponds to a case where
1371 // copies may be necessary.
1372 int64_t intOffset = ShapedType::kDynamicStrideOrOffset;
1373 if (auto o = offset.dyn_cast<AffineConstantExpr>())
1374 intOffset = o.getValue();
1375 SmallVector<int64_t, 4> intStrides;
1376 intStrides.reserve(strides.size());
1377 for (auto stride : newStrides) {
1378 if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>())
1379 intStrides.push_back(cst.getValue());
1380 else
1381 intStrides.push_back(ShapedType::kDynamicStrideOrOffset);
1382 }
1383 auto layout =
1384 makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext());
1385 return canonicalizeStridedLayout(
1386 MemRefType::Builder(type).setShape(newSizes).setLayout(
1387 AffineMapAttr::get(layout)));
1388}
1389
1390void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1391 ArrayRef<ReassociationIndices> reassociation,
1392 ArrayRef<NamedAttribute> attrs) {
1393 auto memRefType = src.getType().cast<MemRefType>();
1394 auto resultType = computeReshapeCollapsedType(
1395 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1396 b.getContext(), reassociation)));
1397 build(b, result, resultType, src, attrs);
1398 result.addAttribute(getReassociationAttrName(),
1399 getReassociationIndicesAttribute(b, reassociation));
1400}
1401
1402void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1403 ArrayRef<ReassociationIndices> reassociation,
1404 ArrayRef<NamedAttribute> attrs) {
1405 auto memRefType = src.getType().cast<MemRefType>();
1406 auto resultType = computeReshapeCollapsedType(
1407 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1408 b.getContext(), reassociation)));
1409 build(b, result, resultType, src, attrs);
1410 result.addAttribute(getReassociationAttrName(),
1411 getReassociationIndicesAttribute(b, reassociation));
1412}
1413
1414template <typename ReshapeOp,
1415 bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value>
1416static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType,
1417 MemRefType collapsedType) {
1418 if (failed(
1419 verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
1420 return failure();
1421 auto maps = op.getReassociationMaps();
1422 MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps);
1423 if (collapsedType != expectedType)
1424 return op.emitOpError("expected collapsed type to be ")
1425 << expectedType << ", but got " << collapsedType;
1426 return success();
1427}
1428
1429static LogicalResult verify(ExpandShapeOp op) {
1430 return verifyReshapeOp(op, op.getResultType(), op.getSrcType());
1431}
1432
1433void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1434 MLIRContext *context) {
1435 results.add<CollapseReshapeOps<ExpandShapeOp>,
1436 CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context);
1437}
1438
1439static LogicalResult verify(CollapseShapeOp op) {
1440 return verifyReshapeOp(op, op.getSrcType(), op.getResultType());
1441}
1442
1443struct CollapseShapeOpMemRefCastFolder
1444 : public OpRewritePattern<CollapseShapeOp> {
1445public:
1446 using OpRewritePattern<CollapseShapeOp>::OpRewritePattern;
1447
1448 LogicalResult matchAndRewrite(CollapseShapeOp op,
1449 PatternRewriter &rewriter) const override {
1450 auto cast = op.getOperand().getDefiningOp<CastOp>();
1451 if (!cast)
1452 return failure();
1453
1454 if (!CastOp::canFoldIntoConsumerOp(cast))
1455 return failure();
1456
1457 Type newResultType = computeReshapeCollapsedType(
1458 cast.getOperand().getType().cast<MemRefType>(),
1459 op.getReassociationMaps());
1460
1461 if (newResultType == op.getResultType()) {
1462 rewriter.updateRootInPlace(
1463 op, [&]() { op.srcMutable().assign(cast.source()); });
1464 } else {
1465 Value newOp = rewriter.create<CollapseShapeOp>(
1466 op->getLoc(), cast.source(), op.getReassociationIndices());
1467 rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
1468 }
1469 return success();
1470 }
1471};
1472
1473void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1474 MLIRContext *context) {
1475 results.add<CollapseReshapeOps<CollapseShapeOp>,
1476 CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
1477 CollapseShapeOpMemRefCastFolder>(context);
1478}
1479OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
1480 return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
1481}
1482OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
1483 return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
1484}
1485
1486//===----------------------------------------------------------------------===//
1487// ReshapeOp
1488//===----------------------------------------------------------------------===//
1489
1490static LogicalResult verify(ReshapeOp op) {
1491 Type operandType = op.source().getType();
1492 Type resultType = op.result().getType();
1493
1494 Type operandElementType = operandType.cast<ShapedType>().getElementType();
1495 Type resultElementType = resultType.cast<ShapedType>().getElementType();
1496 if (operandElementType != resultElementType)
1497 return op.emitOpError("element types of source and destination memref "
1498 "types should be the same");
1499
1500 if (auto operandMemRefType = operandType.dyn_cast<MemRefType>())
1501 if (!operandMemRefType.getLayout().isIdentity())
1502 return op.emitOpError(
1503 "source memref type should have identity affine map");
1504
1505 int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0);
1506 auto resultMemRefType = resultType.dyn_cast<MemRefType>();
1507 if (resultMemRefType) {
1508 if (!resultMemRefType.getLayout().isIdentity())
1509 return op.emitOpError(
1510 "result memref type should have identity affine map");
1511 if (shapeSize == ShapedType::kDynamicSize)
1512 return op.emitOpError("cannot use shape operand with dynamic length to "
1513 "reshape to statically-ranked memref type");
1514 if (shapeSize != resultMemRefType.getRank())
1515 return op.emitOpError(
1516 "length of shape operand differs from the result's memref rank");
1517 }
1518 return success();
1519}
1520
1521//===----------------------------------------------------------------------===//
1522// StoreOp
1523//===----------------------------------------------------------------------===//
1524
1525static LogicalResult verify(StoreOp op) {
1526 if (op.getNumOperands() != 2 + op.getMemRefType().getRank())
1527 return op.emitOpError("store index operand count not equal to memref rank");
1528
1529 return success();
1530}
1531
1532LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
1533 SmallVectorImpl<OpFoldResult> &results) {
1534 /// store(memrefcast) -> store
1535 return foldMemRefCast(*this, getValueToStore());
1536}
1537
1538//===----------------------------------------------------------------------===//
1539// SubViewOp
1540//===----------------------------------------------------------------------===//
1541
1542namespace {
1543/// Helpers to write more idiomatic operations.
1544namespace saturated_arith {
1545struct Wrapper {
1546 explicit Wrapper(int64_t v) : v(v) {}
1547 operator int64_t() { return v; }
1548 int64_t v;
1549};
1550Wrapper operator+(Wrapper a, int64_t b) {
1551 if (ShapedType::isDynamicStrideOrOffset(a) ||
1552 ShapedType::isDynamicStrideOrOffset(b))
1553 return Wrapper(ShapedType::kDynamicStrideOrOffset);
1554 return Wrapper(a.v + b);
1555}
1556Wrapper operator*(Wrapper a, int64_t b) {
1557 if (ShapedType::isDynamicStrideOrOffset(a) ||
1558 ShapedType::isDynamicStrideOrOffset(b))
1559 return Wrapper(ShapedType::kDynamicStrideOrOffset);
1560 return Wrapper(a.v * b);
1561}
1562} // namespace saturated_arith
1563} // namespace
1564
1565/// A subview result type can be fully inferred from the source type and the
1566/// static representation of offsets, sizes and strides. Special sentinels
1567/// encode the dynamic case.
1568Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1569 ArrayRef<int64_t> staticOffsets,
1570 ArrayRef<int64_t> staticSizes,
1571 ArrayRef<int64_t> staticStrides) {
1572 unsigned rank = sourceMemRefType.getRank();
1573 (void)rank;
1574 assert(staticOffsets.size() == rank && "staticOffsets length mismatch")(static_cast <bool> (staticOffsets.size() == rank &&
"staticOffsets length mismatch") ? void (0) : __assert_fail (
"staticOffsets.size() == rank && \"staticOffsets length mismatch\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1574, __extension__
__PRETTY_FUNCTION__))
;
1575 assert(staticSizes.size() == rank && "staticSizes length mismatch")(static_cast <bool> (staticSizes.size() == rank &&
"staticSizes length mismatch") ? void (0) : __assert_fail ("staticSizes.size() == rank && \"staticSizes length mismatch\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1575, __extension__
__PRETTY_FUNCTION__))
;
1576 assert(staticStrides.size() == rank && "staticStrides length mismatch")(static_cast <bool> (staticStrides.size() == rank &&
"staticStrides length mismatch") ? void (0) : __assert_fail (
"staticStrides.size() == rank && \"staticStrides length mismatch\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1576, __extension__
__PRETTY_FUNCTION__))
;
1577
1578 // Extract source offset and strides.
1579 int64_t sourceOffset;
1580 SmallVector<int64_t, 4> sourceStrides;
1581 auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset);
1582 assert(succeeded(res) && "SubViewOp expected strided memref type")(static_cast <bool> (succeeded(res) && "SubViewOp expected strided memref type"
) ? void (0) : __assert_fail ("succeeded(res) && \"SubViewOp expected strided memref type\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1582, __extension__
__PRETTY_FUNCTION__))
;
1583 (void)res;
1584
1585 // Compute target offset whose value is:
1586 // `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`.
1587 int64_t targetOffset = sourceOffset;
1588 for (auto it : llvm::zip(staticOffsets, sourceStrides)) {
1589 auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it);
1590 using namespace saturated_arith;
1591 targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride;
1592 }
1593
1594 // Compute target stride whose value is:
1595 // `sourceStrides_i * staticStrides_i`.
1596 SmallVector<int64_t, 4> targetStrides;
1597 targetStrides.reserve(staticOffsets.size());
1598 for (auto it : llvm::zip(sourceStrides, staticStrides)) {
1599 auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it);
1600 using namespace saturated_arith;
1601 targetStrides.push_back(Wrapper(sourceStride) * staticStride);
1602 }
1603
1604 // The type is now known.
1605 return MemRefType::get(
1606 staticSizes, sourceMemRefType.getElementType(),
1607 makeStridedLinearLayoutMap(targetStrides, targetOffset,
1608 sourceMemRefType.getContext()),
1609 sourceMemRefType.getMemorySpace());
1610}
1611
1612Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1613 ArrayRef<OpFoldResult> offsets,
1614 ArrayRef<OpFoldResult> sizes,
1615 ArrayRef<OpFoldResult> strides) {
1616 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1617 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1618 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1619 ShapedType::kDynamicStrideOrOffset);
1620 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1621 ShapedType::kDynamicSize);
1622 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1623 ShapedType::kDynamicStrideOrOffset);
1624 return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1625 staticSizes, staticStrides);
1626}
1627
1628Type SubViewOp::inferRankReducedResultType(unsigned resultRank,
1629 MemRefType sourceRankedTensorType,
1630 ArrayRef<int64_t> offsets,
1631 ArrayRef<int64_t> sizes,
1632 ArrayRef<int64_t> strides) {
1633 auto inferredType =
1634 inferResultType(sourceRankedTensorType, offsets, sizes, strides)
1635 .cast<MemRefType>();
1636 assert(inferredType.getRank() >= resultRank && "expected ")(static_cast <bool> (inferredType.getRank() >= resultRank
&& "expected ") ? void (0) : __assert_fail ("inferredType.getRank() >= resultRank && \"expected \""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1636, __extension__
__PRETTY_FUNCTION__))
;
1637 int rankDiff = inferredType.getRank() - resultRank;
1638 if (rankDiff > 0) {
1639 auto shape = inferredType.getShape();
1640 llvm::SmallDenseSet<unsigned> dimsToProject;
1641 mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
1642 SmallVector<int64_t> projectedShape;
1643 for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
1644 if (!dimsToProject.contains(pos))
1645 projectedShape.push_back(shape[pos]);
1646
1647 AffineMap map = inferredType.getLayout().getAffineMap();
1648 if (!map.isIdentity())
1649 map = getProjectedMap(map, dimsToProject);
1650 inferredType =
1651 MemRefType::get(projectedShape, inferredType.getElementType(), map,
1652 inferredType.getMemorySpace());
1653 }
1654 return inferredType;
1655}
1656
1657Type SubViewOp::inferRankReducedResultType(unsigned resultRank,
1658 MemRefType sourceRankedTensorType,
1659 ArrayRef<OpFoldResult> offsets,
1660 ArrayRef<OpFoldResult> sizes,
1661 ArrayRef<OpFoldResult> strides) {
1662 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1663 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1664 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1665 ShapedType::kDynamicStrideOrOffset);
1666 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1667 ShapedType::kDynamicSize);
1668 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1669 ShapedType::kDynamicStrideOrOffset);
1670 return SubViewOp::inferRankReducedResultType(
1671 resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1672 staticStrides);
1673}
1674// Build a SubViewOp with mixed static and dynamic entries and custom result
1675// type. If the type passed is nullptr, it is inferred.
1676void SubViewOp::build(OpBuilder &b, OperationState &result,
1677 MemRefType resultType, Value source,
1678 ArrayRef<OpFoldResult> offsets,
1679 ArrayRef<OpFoldResult> sizes,
1680 ArrayRef<OpFoldResult> strides,
1681 ArrayRef<NamedAttribute> attrs) {
1682 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1683 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1684 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1685 ShapedType::kDynamicStrideOrOffset);
1686 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1687 ShapedType::kDynamicSize);
1688 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1689 ShapedType::kDynamicStrideOrOffset);
1690 auto sourceMemRefType = source.getType().cast<MemRefType>();
1691 // Structuring implementation this way avoids duplication between builders.
1692 if (!resultType) {
1693 resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1694 staticSizes, staticStrides)
1695 .cast<MemRefType>();
1696 }
1697 build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1698 dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1699 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1700 result.addAttributes(attrs);
1701}
1702
1703// Build a SubViewOp with mixed static and dynamic entries and inferred result
1704// type.
1705void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1706 ArrayRef<OpFoldResult> offsets,
1707 ArrayRef<OpFoldResult> sizes,
1708 ArrayRef<OpFoldResult> strides,
1709 ArrayRef<NamedAttribute> attrs) {
1710 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1711}
1712
1713// Build a SubViewOp with static entries and inferred result type.
1714void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1715 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1716 ArrayRef<int64_t> strides,
1717 ArrayRef<NamedAttribute> attrs) {
1718 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1719 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1720 return b.getI64IntegerAttr(v);
1721 }));
1722 SmallVector<OpFoldResult> sizeValues =
1723 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1724 return b.getI64IntegerAttr(v);
1725 }));
1726 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1727 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1728 return b.getI64IntegerAttr(v);
1729 }));
1730 build(b, result, source, offsetValues, sizeValues, strideValues, attrs);
1731}
1732
1733// Build a SubViewOp with dynamic entries and custom result type. If the
1734// type passed is nullptr, it is inferred.
1735void SubViewOp::build(OpBuilder &b, OperationState &result,
1736 MemRefType resultType, Value source,
1737 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1738 ArrayRef<int64_t> strides,
1739 ArrayRef<NamedAttribute> attrs) {
1740 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1741 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1742 return b.getI64IntegerAttr(v);
1743 }));
1744 SmallVector<OpFoldResult> sizeValues =
1745 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1746 return b.getI64IntegerAttr(v);
1747 }));
1748 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1749 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1750 return b.getI64IntegerAttr(v);
1751 }));
1752 build(b, result, resultType, source, offsetValues, sizeValues, strideValues,
1753 attrs);
1754}
1755
1756// Build a SubViewOp with dynamic entries and custom result type. If the type
1757// passed is nullptr, it is inferred.
1758void SubViewOp::build(OpBuilder &b, OperationState &result,
1759 MemRefType resultType, Value source, ValueRange offsets,
1760 ValueRange sizes, ValueRange strides,
1761 ArrayRef<NamedAttribute> attrs) {
1762 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1763 llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1764 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1765 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1766 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1767 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1768 build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1769}
1770
1771// Build a SubViewOp with dynamic entries and inferred result type.
1772void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1773 ValueRange offsets, ValueRange sizes, ValueRange strides,
1774 ArrayRef<NamedAttribute> attrs) {
1775 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1776}
1777
1778/// For ViewLikeOpInterface.
1779Value SubViewOp::getViewSource() { return source(); }
1780
1781/// Return true if t1 and t2 have equal offsets (both dynamic or of same static
1782/// value).
1783static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) {
1784 AffineExpr t1Offset, t2Offset;
1785 SmallVector<AffineExpr> t1Strides, t2Strides;
1786 auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset);
1787 auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset);
1788 return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset;
1789}
1790
1791/// Checks if `original` Type type can be rank reduced to `reduced` type.
1792/// This function is slight variant of `is subsequence` algorithm where
1793/// not matching dimension must be 1.
1794static SliceVerificationResult
1795isRankReducedMemRefType(MemRefType originalType,
1796 MemRefType candidateRankReducedType,
1797 ArrayRef<OpFoldResult> sizes) {
1798 auto partialRes = isRankReducedType(originalType, candidateRankReducedType);
1799 if (partialRes != SliceVerificationResult::Success)
1800 return partialRes;
1801
1802 auto optionalUnusedDimsMask = computeMemRefRankReductionMask(
1803 originalType, candidateRankReducedType, sizes);
1804
1805 // Sizes cannot be matched in case empty vector is returned.
1806 if (!optionalUnusedDimsMask.hasValue())
1807 return SliceVerificationResult::LayoutMismatch;
1808
1809 if (originalType.getMemorySpace() !=
1810 candidateRankReducedType.getMemorySpace())
1811 return SliceVerificationResult::MemSpaceMismatch;
1812
1813 // No amount of stride dropping can reconcile incompatible offsets.
1814 if (!haveCompatibleOffsets(originalType, candidateRankReducedType))
1815 return SliceVerificationResult::LayoutMismatch;
1816
1817 return SliceVerificationResult::Success;
1818}
1819
1820template <typename OpTy>
1821static LogicalResult produceSubViewErrorMsg(SliceVerificationResult result,
1822 OpTy op, Type expectedType) {
1823 auto memrefType = expectedType.cast<ShapedType>();
1824 switch (result) {
1825 case SliceVerificationResult::Success:
1826 return success();
1827 case SliceVerificationResult::RankTooLarge:
1828 return op.emitError("expected result rank to be smaller or equal to ")
1829 << "the source rank. ";
1830 case SliceVerificationResult::SizeMismatch:
1831 return op.emitError("expected result type to be ")
1832 << expectedType
1833 << " or a rank-reduced version. (mismatch of result sizes) ";
1834 case SliceVerificationResult::ElemTypeMismatch:
1835 return op.emitError("expected result element type to be ")
1836 << memrefType.getElementType();
1837 case SliceVerificationResult::MemSpaceMismatch:
1838 return op.emitError("expected result and source memory spaces to match.");
1839 case SliceVerificationResult::LayoutMismatch:
1840 return op.emitError("expected result type to be ")
1841 << expectedType
1842 << " or a rank-reduced version. (mismatch of result layout) ";
1843 }
1844 llvm_unreachable("unexpected subview verification result")::llvm::llvm_unreachable_internal("unexpected subview verification result"
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1844)
;
1845}
1846
1847/// Verifier for SubViewOp.
1848static LogicalResult verify(SubViewOp op) {
1849 MemRefType baseType = op.getSourceType();
1850 MemRefType subViewType = op.getType();
1851
1852 // The base memref and the view memref should be in the same memory space.
1853 if (baseType.getMemorySpace() != subViewType.getMemorySpace())
1854 return op.emitError("different memory spaces specified for base memref "
1855 "type ")
1856 << baseType << " and subview memref type " << subViewType;
1857
1858 // Verify that the base memref type has a strided layout map.
1859 if (!isStrided(baseType))
1860 return op.emitError("base type ") << baseType << " is not strided";
1861
1862 // Verify result type against inferred type.
1863 auto expectedType = SubViewOp::inferResultType(
1864 baseType, extractFromI64ArrayAttr(op.static_offsets()),
1865 extractFromI64ArrayAttr(op.static_sizes()),
1866 extractFromI64ArrayAttr(op.static_strides()));
1867
1868 auto result = isRankReducedMemRefType(expectedType.cast<MemRefType>(),
1869 subViewType, op.getMixedSizes());
1870 return produceSubViewErrorMsg(result, op, expectedType);
1871}
1872
1873raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) {
1874 return os << "range " << range.offset << ":" << range.size << ":"
1875 << range.stride;
1876}
1877
1878/// Return the list of Range (i.e. offset, size, stride). Each Range
1879/// entry contains either the dynamic value or a ConstantIndexOp constructed
1880/// with `b` at location `loc`.
1881SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
1882 OpBuilder &b, Location loc) {
1883 std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks();
1884 assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks")(static_cast <bool> (ranks[0] == ranks[1] && "expected offset and sizes of equal ranks"
) ? void (0) : __assert_fail ("ranks[0] == ranks[1] && \"expected offset and sizes of equal ranks\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1884, __extension__
__PRETTY_FUNCTION__))
;
1885 assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks")(static_cast <bool> (ranks[1] == ranks[2] && "expected sizes and strides of equal ranks"
) ? void (0) : __assert_fail ("ranks[1] == ranks[2] && \"expected sizes and strides of equal ranks\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 1885, __extension__
__PRETTY_FUNCTION__))
;
1886 SmallVector<Range, 8> res;
1887 unsigned rank = ranks[0];
1888 res.reserve(rank);
1889 for (unsigned idx = 0; idx < rank; ++idx) {
1890 Value offset =
1891 op.isDynamicOffset(idx)
1892 ? op.getDynamicOffset(idx)
1893 : b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx));
1894 Value size =
1895 op.isDynamicSize(idx)
1896 ? op.getDynamicSize(idx)
1897 : b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx));
1898 Value stride =
1899 op.isDynamicStride(idx)
1900 ? op.getDynamicStride(idx)
1901 : b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx));
1902 res.emplace_back(Range{offset, size, stride});
1903 }
1904 return res;
1905}
1906
1907/// Compute the canonical result type of a SubViewOp. Call `inferResultType` to
1908/// deduce the result type for the given `sourceType`. Additionally, reduce the
1909/// rank of the inferred result type if `currentResultType` is lower rank than
1910/// `currentSourceType`. Use this signature if `sourceType` is updated together
1911/// with the result type. In this case, it is important to compute the dropped
1912/// dimensions using `currentSourceType` whose strides align with
1913/// `currentResultType`.
1914static MemRefType getCanonicalSubViewResultType(
1915 MemRefType currentResultType, MemRefType currentSourceType,
1916 MemRefType sourceType, ArrayRef<OpFoldResult> mixedOffsets,
1917 ArrayRef<OpFoldResult> mixedSizes, ArrayRef<OpFoldResult> mixedStrides) {
1918 auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
1919 mixedSizes, mixedStrides)
1920 .cast<MemRefType>();
1921 llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims =
1922 computeMemRefRankReductionMask(currentSourceType, currentResultType,
1923 mixedSizes);
1924 // Return nullptr as failure mode.
1925 if (!unusedDims)
1926 return nullptr;
1927 SmallVector<int64_t> shape;
1928 for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) {
1929 if (unusedDims->count(sizes.index()))
1930 continue;
1931 shape.push_back(sizes.value());
1932 }
1933 AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
1934 if (!layoutMap.isIdentity())
1935 layoutMap = getProjectedMap(layoutMap, unusedDims.getValue());
1936 return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
1937 nonRankReducedType.getMemorySpace());
1938}
1939
1940/// Compute the canonical result type of a SubViewOp. Call `inferResultType` to
1941/// deduce the result type. Additionally, reduce the rank of the inferred result
1942/// type if `currentResultType` is lower rank than `sourceType`.
1943static MemRefType getCanonicalSubViewResultType(
1944 MemRefType currentResultType, MemRefType sourceType,
1945 ArrayRef<OpFoldResult> mixedOffsets, ArrayRef<OpFoldResult> mixedSizes,
1946 ArrayRef<OpFoldResult> mixedStrides) {
1947 return getCanonicalSubViewResultType(currentResultType, sourceType,
1948 sourceType, mixedOffsets, mixedSizes,
1949 mixedStrides);
1950}
1951
1952/// Helper method to check if a `subview` operation is trivially a no-op. This
1953/// is the case if the all offsets are zero, all strides are 1, and the source
1954/// shape is same as the size of the subview. In such cases, the subview can be
1955/// folded into its source.
1956static bool isTrivialSubViewOp(SubViewOp subViewOp) {
1957 if (subViewOp.getSourceType().getRank() != subViewOp.getType().getRank())
1958 return false;
1959
1960 auto mixedOffsets = subViewOp.getMixedOffsets();
1961 auto mixedSizes = subViewOp.getMixedSizes();
1962 auto mixedStrides = subViewOp.getMixedStrides();
1963
1964 // Check offsets are zero.
1965 if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) {
1966 Optional<int64_t> intValue = getConstantIntValue(ofr);
1967 return !intValue || intValue.getValue() != 0;
1968 }))
1969 return false;
1970
1971 // Check strides are one.
1972 if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) {
1973 Optional<int64_t> intValue = getConstantIntValue(ofr);
1974 return !intValue || intValue.getValue() != 1;
1975 }))
1976 return false;
1977
1978 // Check all size values are static and matches the (static) source shape.
1979 ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
1980 for (const auto &size : llvm::enumerate(mixedSizes)) {
1981 Optional<int64_t> intValue = getConstantIntValue(size.value());
1982 if (!intValue || intValue.getValue() != sourceShape[size.index()])
1983 return false;
1984 }
1985 // All conditions met. The `SubViewOp` is foldable as a no-op.
1986 return true;
1987}
1988
1989namespace {
1990/// Pattern to rewrite a subview op with MemRefCast arguments.
1991/// This essentially pushes memref.cast past its consuming subview when
1992/// `canFoldIntoConsumerOp` is true.
1993///
1994/// Example:
1995/// ```
1996/// %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32>
1997/// %1 = memref.subview %0[0, 0][3, 4][1, 1] :
1998/// memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
1999/// ```
2000/// is rewritten into:
2001/// ```
2002/// %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
2003/// %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to
2004/// memref<3x4xf32, offset:?, strides:[?, 1]>
2005/// ```
2006class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
2007public:
2008 using OpRewritePattern<SubViewOp>::OpRewritePattern;
2009
2010 LogicalResult matchAndRewrite(SubViewOp subViewOp,
2011 PatternRewriter &rewriter) const override {
2012 // Any constant operand, just return to let SubViewOpConstantFolder kick in.
2013 if (llvm::any_of(subViewOp.getOperands(), [](Value operand) {
2014 return matchPattern(operand, matchConstantIndex());
2015 }))
2016 return failure();
2017
2018 auto castOp = subViewOp.source().getDefiningOp<CastOp>();
2019 if (!castOp)
2020 return failure();
2021
2022 if (!CastOp::canFoldIntoConsumerOp(castOp))
2023 return failure();
2024
2025 // Compute the SubViewOp result type after folding the MemRefCastOp. Use the
2026 // MemRefCastOp source operand type to infer the result type and the current
2027 // SubViewOp source operand type to compute the dropped dimensions if the
2028 // operation is rank-reducing.
2029 auto resultType = getCanonicalSubViewResultType(
2030 subViewOp.getType(), subViewOp.getSourceType(),
2031 castOp.source().getType().cast<MemRefType>(),
2032 subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(),
2033 subViewOp.getMixedStrides());
2034 if (!resultType)
2035 return failure();
2036
2037 Value newSubView = rewriter.create<SubViewOp>(
2038 subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(),
2039 subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(),
2040 subViewOp.static_sizes(), subViewOp.static_strides());
2041 rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
2042 newSubView);
2043 return success();
2044 }
2045};
2046
2047/// Canonicalize subview ops that are no-ops. When the source shape is not same
2048/// as a result shape due to use of `affine_map`.
2049class TrivialSubViewOpFolder final : public OpRewritePattern<SubViewOp> {
2050public:
2051 using OpRewritePattern<SubViewOp>::OpRewritePattern;
2052
2053 LogicalResult matchAndRewrite(SubViewOp subViewOp,
2054 PatternRewriter &rewriter) const override {
2055 if (!isTrivialSubViewOp(subViewOp))
2056 return failure();
2057 if (subViewOp.getSourceType() == subViewOp.getType()) {
2058 rewriter.replaceOp(subViewOp, subViewOp.source());
2059 return success();
2060 }
2061 rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.source(),
2062 subViewOp.getType());
2063 return success();
2064 }
2065};
2066} // namespace
2067
2068/// Return the canonical type of the result of a subview.
2069struct SubViewReturnTypeCanonicalizer {
2070 MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets,
2071 ArrayRef<OpFoldResult> mixedSizes,
2072 ArrayRef<OpFoldResult> mixedStrides) {
2073 return getCanonicalSubViewResultType(op.getType(), op.getSourceType(),
2074 mixedOffsets, mixedSizes,
2075 mixedStrides);
2076 }
2077};
2078
2079/// A canonicalizer wrapper to replace SubViewOps.
2080struct SubViewCanonicalizer {
2081 void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) {
2082 rewriter.replaceOpWithNewOp<CastOp>(op, newOp, op.getType());
2083 }
2084};
2085
2086void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2087 MLIRContext *context) {
2088 results
2089 .add<OpWithOffsetSizesAndStridesConstantArgumentFolder<
2090 SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>,
2091 SubViewOpMemRefCastFolder, TrivialSubViewOpFolder>(context);
2092}
2093
2094OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) {
2095 auto resultShapedType = getResult().getType().cast<ShapedType>();
2096 auto sourceShapedType = source().getType().cast<ShapedType>();
2097
2098 if (resultShapedType.hasStaticShape() &&
2099 resultShapedType == sourceShapedType) {
2100 return getViewSource();
2101 }
2102
2103 return {};
2104}
2105
2106//===----------------------------------------------------------------------===//
2107// TransposeOp
2108//===----------------------------------------------------------------------===//
2109
2110/// Build a strided memref type by applying `permutationMap` tp `memRefType`.
2111static MemRefType inferTransposeResultType(MemRefType memRefType,
2112 AffineMap permutationMap) {
2113 auto rank = memRefType.getRank();
2114 auto originalSizes = memRefType.getShape();
2115 // Compute permuted sizes.
2116 SmallVector<int64_t, 4> sizes(rank, 0);
2117 for (const auto &en : llvm::enumerate(permutationMap.getResults()))
2118 sizes[en.index()] =
2119 originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
2120
2121 // Compute permuted strides.
2122 int64_t offset;
2123 SmallVector<int64_t, 4> strides;
2124 auto res = getStridesAndOffset(memRefType, strides, offset);
2125 assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank))(static_cast <bool> (succeeded(res) && strides.
size() == static_cast<unsigned>(rank)) ? void (0) : __assert_fail
("succeeded(res) && strides.size() == static_cast<unsigned>(rank)"
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 2125, __extension__
__PRETTY_FUNCTION__))
;
2126 (void)res;
2127 auto map =
2128 makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
2129 map = permutationMap ? map.compose(permutationMap) : map;
2130 return MemRefType::Builder(memRefType)
2131 .setShape(sizes)
2132 .setLayout(AffineMapAttr::get(map));
2133}
2134
2135void TransposeOp::build(OpBuilder &b, OperationState &result, Value in,
2136 AffineMapAttr permutation,
2137 ArrayRef<NamedAttribute> attrs) {
2138 auto permutationMap = permutation.getValue();
2139 assert(permutationMap)(static_cast <bool> (permutationMap) ? void (0) : __assert_fail
("permutationMap", "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp"
, 2139, __extension__ __PRETTY_FUNCTION__))
;
2140
2141 auto memRefType = in.getType().cast<MemRefType>();
2142 // Compute result type.
2143 MemRefType resultType = inferTransposeResultType(memRefType, permutationMap);
2144
2145 build(b, result, resultType, in, attrs);
2146 result.addAttribute(TransposeOp::getPermutationAttrName(), permutation);
2147}
2148
2149// transpose $in $permutation attr-dict : type($in) `to` type(results)
2150static void print(OpAsmPrinter &p, TransposeOp op) {
2151 p << " " << op.in() << " " << op.permutation();
2152 p.printOptionalAttrDict(op->getAttrs(),
2153 {TransposeOp::getPermutationAttrName()});
2154 p << " : " << op.in().getType() << " to " << op.getType();
2155}
2156
2157static ParseResult parseTransposeOp(OpAsmParser &parser,
2158 OperationState &result) {
2159 OpAsmParser::OperandType in;
2160 AffineMap permutation;
2161 MemRefType srcType, dstType;
2162 if (parser.parseOperand(in) || parser.parseAffineMap(permutation) ||
2163 parser.parseOptionalAttrDict(result.attributes) ||
2164 parser.parseColonType(srcType) ||
2165 parser.resolveOperand(in, srcType, result.operands) ||
2166 parser.parseKeywordType("to", dstType) ||
2167 parser.addTypeToList(dstType, result.types))
2168 return failure();
2169
2170 result.addAttribute(TransposeOp::getPermutationAttrName(),
2171 AffineMapAttr::get(permutation));
2172 return success();
2173}
2174
2175static LogicalResult verify(TransposeOp op) {
2176 if (!op.permutation().isPermutation())
2177 return op.emitOpError("expected a permutation map");
2178 if (op.permutation().getNumDims() != op.getShapedType().getRank())
2179 return op.emitOpError(
2180 "expected a permutation map of same rank as the input");
2181
2182 auto srcType = op.in().getType().cast<MemRefType>();
2183 auto dstType = op.getType().cast<MemRefType>();
2184 auto transposedType = inferTransposeResultType(srcType, op.permutation());
2185 if (dstType != transposedType)
2186 return op.emitOpError("output type ")
2187 << dstType << " does not match transposed input type " << srcType
2188 << ", " << transposedType;
2189 return success();
2190}
2191
2192OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) {
2193 if (succeeded(foldMemRefCast(*this)))
2194 return getResult();
2195 return {};
2196}
2197
2198//===----------------------------------------------------------------------===//
2199// ViewOp
2200//===----------------------------------------------------------------------===//
2201
2202static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
2203 OpAsmParser::OperandType srcInfo;
2204 SmallVector<OpAsmParser::OperandType, 1> offsetInfo;
2205 SmallVector<OpAsmParser::OperandType, 4> sizesInfo;
2206 auto indexType = parser.getBuilder().getIndexType();
2207 Type srcType, dstType;
2208 llvm::SMLoc offsetLoc;
2209 if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) ||
2210 parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square))
2211 return failure();
2212
2213 if (offsetInfo.size() != 1)
2214 return parser.emitError(offsetLoc) << "expects 1 offset operand";
2215
2216 return failure(
2217 parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) ||
2218 parser.parseOptionalAttrDict(result.attributes) ||
2219 parser.parseColonType(srcType) ||
2220 parser.resolveOperand(srcInfo, srcType, result.operands) ||
2221 parser.resolveOperands(offsetInfo, indexType, result.operands) ||
2222 parser.resolveOperands(sizesInfo, indexType, result.operands) ||
2223 parser.parseKeywordType("to", dstType) ||
2224 parser.addTypeToList(dstType, result.types));
2225}
2226
2227static void print(OpAsmPrinter &p, ViewOp op) {
2228 p << ' ' << op.getOperand(0) << '[';
2229 p.printOperand(op.byte_shift());
2230 p << "][" << op.sizes() << ']';
2231 p.printOptionalAttrDict(op->getAttrs());
2232 p << " : " << op.getOperand(0).getType() << " to " << op.getType();
2233}
2234
2235static LogicalResult verify(ViewOp op) {
2236 auto baseType = op.getOperand(0).getType().cast<MemRefType>();
2237 auto viewType = op.getType();
2238
2239 // The base memref should have identity layout map (or none).
2240 if (!baseType.getLayout().isIdentity())
2241 return op.emitError("unsupported map for base memref type ") << baseType;
2242
2243 // The result memref should have identity layout map (or none).
2244 if (!viewType.getLayout().isIdentity())
2245 return op.emitError("unsupported map for result memref type ") << viewType;
2246
2247 // The base memref and the view memref should be in the same memory space.
2248 if (baseType.getMemorySpace() != viewType.getMemorySpace())
2249 return op.emitError("different memory spaces specified for base memref "
2250 "type ")
2251 << baseType << " and view memref type " << viewType;
2252
2253 // Verify that we have the correct number of sizes for the result type.
2254 unsigned numDynamicDims = viewType.getNumDynamicDims();
2255 if (op.sizes().size() != numDynamicDims)
2256 return op.emitError("incorrect number of size operands for type ")
2257 << viewType;
2258
2259 return success();
2260}
2261
2262Value ViewOp::getViewSource() { return source(); }
2263
2264namespace {
2265
2266struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
2267 using OpRewritePattern<ViewOp>::OpRewritePattern;
2268
2269 LogicalResult matchAndRewrite(ViewOp viewOp,
2270 PatternRewriter &rewriter) const override {
2271 // Return if none of the operands are constants.
2272 if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
2273 return matchPattern(operand, matchConstantIndex());
2274 }))
2275 return failure();
2276
2277 // Get result memref type.
2278 auto memrefType = viewOp.getType();
2279
2280 // Get offset from old memref view type 'memRefType'.
2281 int64_t oldOffset;
2282 SmallVector<int64_t, 4> oldStrides;
2283 if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
2284 return failure();
2285 assert(oldOffset == 0 && "Expected 0 offset")(static_cast <bool> (oldOffset == 0 && "Expected 0 offset"
) ? void (0) : __assert_fail ("oldOffset == 0 && \"Expected 0 offset\""
, "mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp", 2285, __extension__
__PRETTY_FUNCTION__))
;
2286
2287 SmallVector<Value, 4> newOperands;
2288
2289 // Offset cannot be folded into result type.
2290
2291 // Fold any dynamic dim operands which are produced by a constant.
2292 SmallVector<int64_t, 4> newShapeConstants;
2293 newShapeConstants.reserve(memrefType.getRank());
2294
2295 unsigned dynamicDimPos = 0;
2296 unsigned rank = memrefType.getRank();
2297 for (unsigned dim = 0, e = rank; dim < e; ++dim) {
2298 int64_t dimSize = memrefType.getDimSize(dim);
2299 // If this is already static dimension, keep it.
2300 if (!ShapedType::isDynamic(dimSize)) {
2301 newShapeConstants.push_back(dimSize);
2302 continue;
2303 }
2304 auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp();
2305 if (auto constantIndexOp =
2306 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
2307 // Dynamic shape dimension will be folded.
2308 newShapeConstants.push_back(constantIndexOp.value());
2309 } else {
2310 // Dynamic shape dimension not folded; copy operand from old memref.
2311 newShapeConstants.push_back(dimSize);
2312 newOperands.push_back(viewOp.sizes()[dynamicDimPos]);
2313 }
2314 dynamicDimPos++;
2315 }
2316
2317 // Create new memref type with constant folded dims.
2318 MemRefType newMemRefType =
2319 MemRefType::Builder(memrefType).setShape(newShapeConstants);
2320 // Nothing new, don't fold.
2321 if (newMemRefType == memrefType)
2322 return failure();
2323
2324 // Create new ViewOp.
2325 auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType,
2326 viewOp.getOperand(0),
2327 viewOp.byte_shift(), newOperands);
2328 // Insert a cast so we have the same type as the old memref type.
2329 rewriter.replaceOpWithNewOp<CastOp>(viewOp, newViewOp, viewOp.getType());
2330 return success();
2331 }
2332};
2333
2334struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
2335 using OpRewritePattern<ViewOp>::OpRewritePattern;
2336
2337 LogicalResult matchAndRewrite(ViewOp viewOp,
2338 PatternRewriter &rewriter) const override {
2339 Value memrefOperand = viewOp.getOperand(0);
2340 CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>();
2341 if (!memrefCastOp)
2342 return failure();
2343 Value allocOperand = memrefCastOp.getOperand();
2344 AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>();
2345 if (!allocOp)
2346 return failure();
2347 rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
2348 viewOp.byte_shift(), viewOp.sizes());
2349 return success();
2350 }
2351};
2352
2353} // namespace
2354
2355void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2356 MLIRContext *context) {
2357 results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
2358}
2359
2360//===----------------------------------------------------------------------===//
2361// AtomicRMWOp
2362//===----------------------------------------------------------------------===//
2363
2364static LogicalResult verify(AtomicRMWOp op) {
2365 if (op.getMemRefType().getRank() != op.getNumOperands() - 2)
2366 return op.emitOpError(
2367 "expects the number of subscripts to be equal to memref rank");
2368 switch (op.kind()) {
2369 case arith::AtomicRMWKind::addf:
2370 case arith::AtomicRMWKind::maxf:
2371 case arith::AtomicRMWKind::minf:
2372 case arith::AtomicRMWKind::mulf:
2373 if (!op.value().getType().isa<FloatType>())
2374 return op.emitOpError()
2375 << "with kind '" << arith::stringifyAtomicRMWKind(op.kind())
2376 << "' expects a floating-point type";
2377 break;
2378 case arith::AtomicRMWKind::addi:
2379 case arith::AtomicRMWKind::maxs:
2380 case arith::AtomicRMWKind::maxu:
2381 case arith::AtomicRMWKind::mins:
2382 case arith::AtomicRMWKind::minu:
2383 case arith::AtomicRMWKind::muli:
2384 case arith::AtomicRMWKind::ori:
2385 case arith::AtomicRMWKind::andi:
2386 if (!op.value().getType().isa<IntegerType>())
2387 return op.emitOpError()
2388 << "with kind '" << arith::stringifyAtomicRMWKind(op.kind())
2389 << "' expects an integer type";
2390 break;
2391 default:
2392 break;
2393 }
2394 return success();
2395}
2396
2397OpFoldResult AtomicRMWOp::fold(ArrayRef<Attribute> operands) {
2398 /// atomicrmw(memrefcast) -> atomicrmw
2399 if (succeeded(foldMemRefCast(*this, value())))
2400 return getResult();
2401 return OpFoldResult();
2402}
2403
2404//===----------------------------------------------------------------------===//
2405// TableGen'd op method definitions
2406//===----------------------------------------------------------------------===//
2407
2408#define GET_OP_CLASSES
2409#include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc"

tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc

1/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
2|* *|
3|* Op Declarations *|
4|* *|
5|* Automatically generated file, do not edit! *|
6|* *|
7\*===----------------------------------------------------------------------===*/
8
9#if defined(GET_OP_CLASSES) || defined(GET_OP_FWD_DEFINES)
10#undef GET_OP_FWD_DEFINES
11namespace mlir {
12namespace memref {
13class AssumeAlignmentOp;
14} // namespace memref
15} // namespace mlir
16namespace mlir {
17namespace memref {
18class AtomicRMWOp;
19} // namespace memref
20} // namespace mlir
21namespace mlir {
22namespace memref {
23class CopyOp;
24} // namespace memref
25} // namespace mlir
26namespace mlir {
27namespace memref {
28class LoadOp;
29} // namespace memref
30} // namespace mlir
31namespace mlir {
32namespace memref {
33class AllocOp;
34} // namespace memref
35} // namespace mlir
36namespace mlir {
37namespace memref {
38class AllocaOp;
39} // namespace memref
40} // namespace mlir
41namespace mlir {
42namespace memref {
43class AllocaScopeOp;
44} // namespace memref
45} // namespace mlir
46namespace mlir {
47namespace memref {
48class AllocaScopeReturnOp;
49} // namespace memref
50} // namespace mlir
51namespace mlir {
52namespace memref {
53class CastOp;
54} // namespace memref
55} // namespace mlir
56namespace mlir {
57namespace memref {
58class CollapseShapeOp;
59} // namespace memref
60} // namespace mlir
61namespace mlir {
62namespace memref {
63class DeallocOp;
64} // namespace memref
65} // namespace mlir
66namespace mlir {
67namespace memref {
68class DimOp;
69} // namespace memref
70} // namespace mlir
71namespace mlir {
72namespace memref {
73class DmaStartOp;
74} // namespace memref
75} // namespace mlir
76namespace mlir {
77namespace memref {
78class DmaWaitOp;
79} // namespace memref
80} // namespace mlir
81namespace mlir {
82namespace memref {
83class ExpandShapeOp;
84} // namespace memref
85} // namespace mlir
86namespace mlir {
87namespace memref {
88class GetGlobalOp;
89} // namespace memref
90} // namespace mlir
91namespace mlir {
92namespace memref {
93class GlobalOp;
94} // namespace memref
95} // namespace mlir
96namespace mlir {
97namespace memref {
98class PrefetchOp;
99} // namespace memref
100} // namespace mlir
101namespace mlir {
102namespace memref {
103class RankOp;
104} // namespace memref
105} // namespace mlir
106namespace mlir {
107namespace memref {
108class ReinterpretCastOp;
109} // namespace memref
110} // namespace mlir
111namespace mlir {
112namespace memref {
113class ReshapeOp;
114} // namespace memref
115} // namespace mlir
116namespace mlir {
117namespace memref {
118class StoreOp;
119} // namespace memref
120} // namespace mlir
121namespace mlir {
122namespace memref {
123class TransposeOp;
124} // namespace memref
125} // namespace mlir
126namespace mlir {
127namespace memref {
128class ViewOp;
129} // namespace memref
130} // namespace mlir
131namespace mlir {
132namespace memref {
133class SubViewOp;
134} // namespace memref
135} // namespace mlir
136namespace mlir {
137namespace memref {
138class TensorStoreOp;
139} // namespace memref
140} // namespace mlir
141#endif
142
143#ifdef GET_OP_CLASSES
144#undef GET_OP_CLASSES
145
146
147//===----------------------------------------------------------------------===//
148// Local Utility Method Definitions
149//===----------------------------------------------------------------------===//
150
151namespace mlir {
152namespace memref {
153
154//===----------------------------------------------------------------------===//
155// ::mlir::memref::AssumeAlignmentOp declarations
156//===----------------------------------------------------------------------===//
157
158class AssumeAlignmentOpAdaptor {
159public:
160 AssumeAlignmentOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
161
162 AssumeAlignmentOpAdaptor(AssumeAlignmentOp &op);
163
164 ::mlir::ValueRange getOperands();
165 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
166 ::mlir::ValueRange getODSOperands(unsigned index);
167 ::mlir::Value memref();
168 ::mlir::DictionaryAttr getAttributes();
169 ::mlir::IntegerAttr alignmentAttr();
170 uint32_t alignment();
171 ::mlir::LogicalResult verify(::mlir::Location loc);
172private:
173 ::mlir::ValueRange odsOperands;
174 ::mlir::DictionaryAttr odsAttrs;
175 ::mlir::RegionRange odsRegions;
176};
177class AssumeAlignmentOp : public ::mlir::Op<AssumeAlignmentOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::OneOperand> {
178public:
179 using Op::Op;
180 using Op::print;
181 using Adaptor = AssumeAlignmentOpAdaptor;
182public:
183 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
184 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("alignment")};
185 return ::llvm::makeArrayRef(attrNames);
186 }
187
188 ::mlir::StringAttr alignmentAttrName() {
189 return getAttributeNameForIndex(0);
190 }
191
192 static ::mlir::StringAttr alignmentAttrName(::mlir::OperationName name) {
193 return getAttributeNameForIndex(name, 0);
194 }
195
196 static constexpr ::llvm::StringLiteral getOperationName() {
197 return ::llvm::StringLiteral("memref.assume_alignment");
198 }
199
200 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
201 ::mlir::Operation::operand_range getODSOperands(unsigned index);
202 ::mlir::Value memref();
203 ::mlir::MutableOperandRange memrefMutable();
204 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
205 ::mlir::Operation::result_range getODSResults(unsigned index);
206 ::mlir::IntegerAttr alignmentAttr();
207 uint32_t alignment();
208 void alignmentAttr(::mlir::IntegerAttr attr);
209 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value memref, ::mlir::IntegerAttr alignment);
210 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value memref, ::mlir::IntegerAttr alignment);
211 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value memref, uint32_t alignment);
212 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value memref, uint32_t alignment);
213 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
214 ::mlir::LogicalResult verify();
215 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
216 void print(::mlir::OpAsmPrinter &_odsPrinter);
217private:
218 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
219 return getAttributeNameForIndex((*this)->getName(), index);
220 }
221
222 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
223 assert(index < 1 && "invalid attribute index")(static_cast <bool> (index < 1 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 1 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 223, __extension__ __PRETTY_FUNCTION__))
;
224 return name.getRegisteredInfo()->getAttributeNames()[index];
225 }
226
227public:
228};
229} // namespace memref
230} // namespace mlir
231DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::AssumeAlignmentOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::AssumeAlignmentOp>(); } }
232
233namespace mlir {
234namespace memref {
235
236//===----------------------------------------------------------------------===//
237// ::mlir::memref::AtomicRMWOp declarations
238//===----------------------------------------------------------------------===//
239
240class AtomicRMWOpAdaptor {
241public:
242 AtomicRMWOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
243
244 AtomicRMWOpAdaptor(AtomicRMWOp &op);
245
246 ::mlir::ValueRange getOperands();
247 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
248 ::mlir::ValueRange getODSOperands(unsigned index);
249 ::mlir::Value value();
250 ::mlir::Value memref();
251 ::mlir::ValueRange indices();
252 ::mlir::DictionaryAttr getAttributes();
253 ::mlir::arith::AtomicRMWKindAttr kindAttr();
254 ::mlir::arith::AtomicRMWKind kind();
255 ::mlir::LogicalResult verify(::mlir::Location loc);
256private:
257 ::mlir::ValueRange odsOperands;
258 ::mlir::DictionaryAttr odsAttrs;
259 ::mlir::RegionRange odsRegions;
260};
261class AtomicRMWOp : public ::mlir::Op<AtomicRMWOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::Type>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<2>::Impl> {
262public:
263 using Op::Op;
264 using Op::print;
265 using Adaptor = AtomicRMWOpAdaptor;
266public:
267 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
268 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("kind")};
269 return ::llvm::makeArrayRef(attrNames);
270 }
271
272 ::mlir::StringAttr kindAttrName() {
273 return getAttributeNameForIndex(0);
274 }
275
276 static ::mlir::StringAttr kindAttrName(::mlir::OperationName name) {
277 return getAttributeNameForIndex(name, 0);
278 }
279
280 static constexpr ::llvm::StringLiteral getOperationName() {
281 return ::llvm::StringLiteral("memref.atomic_rmw");
282 }
283
284 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
285 ::mlir::Operation::operand_range getODSOperands(unsigned index);
286 ::mlir::Value value();
287 ::mlir::Value memref();
288 ::mlir::Operation::operand_range indices();
289 ::mlir::MutableOperandRange valueMutable();
290 ::mlir::MutableOperandRange memrefMutable();
291 ::mlir::MutableOperandRange indicesMutable();
292 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
293 ::mlir::Operation::result_range getODSResults(unsigned index);
294 ::mlir::Value result();
295 ::mlir::arith::AtomicRMWKindAttr kindAttr();
296 ::mlir::arith::AtomicRMWKind kind();
297 void kindAttr(::mlir::arith::AtomicRMWKindAttr attr);
298 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::arith::AtomicRMWKindAttr kind, ::mlir::Value value, ::mlir::Value memref, ::mlir::ValueRange indices);
299 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::arith::AtomicRMWKindAttr kind, ::mlir::Value value, ::mlir::Value memref, ::mlir::ValueRange indices);
300 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::arith::AtomicRMWKind kind, ::mlir::Value value, ::mlir::Value memref, ::mlir::ValueRange indices);
301 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::arith::AtomicRMWKind kind, ::mlir::Value value, ::mlir::Value memref, ::mlir::ValueRange indices);
302 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
303 ::mlir::LogicalResult verify();
304 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
305 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
306 void print(::mlir::OpAsmPrinter &_odsPrinter);
307private:
308 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
309 return getAttributeNameForIndex((*this)->getName(), index);
310 }
311
312 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
313 assert(index < 1 && "invalid attribute index")(static_cast <bool> (index < 1 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 1 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 313, __extension__ __PRETTY_FUNCTION__))
;
314 return name.getRegisteredInfo()->getAttributeNames()[index];
315 }
316
317public:
318 MemRefType getMemRefType() {
319 return memref().getType().cast<MemRefType>();
320 }
321};
322} // namespace memref
323} // namespace mlir
324DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::AtomicRMWOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::AtomicRMWOp>(); } }
325
326namespace mlir {
327namespace memref {
328
329//===----------------------------------------------------------------------===//
330// ::mlir::memref::CopyOp declarations
331//===----------------------------------------------------------------------===//
332
333class CopyOpAdaptor {
334public:
335 CopyOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
336
337 CopyOpAdaptor(CopyOp &op);
338
339 ::mlir::ValueRange getOperands();
340 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
341 ::mlir::ValueRange getODSOperands(unsigned index);
342 ::mlir::Value source();
343 ::mlir::Value target();
344 ::mlir::DictionaryAttr getAttributes();
345 ::mlir::LogicalResult verify(::mlir::Location loc);
346private:
347 ::mlir::ValueRange odsOperands;
348 ::mlir::DictionaryAttr odsAttrs;
349 ::mlir::RegionRange odsRegions;
350};
351class CopyOp : public ::mlir::Op<CopyOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::NOperands<2>::Impl, ::mlir::CopyOpInterface::Trait, ::mlir::OpTrait::SameOperandsElementType, ::mlir::OpTrait::SameOperandsShape, ::mlir::MemoryEffectOpInterface::Trait> {
352public:
353 using Op::Op;
354 using Op::print;
355 using Adaptor = CopyOpAdaptor;
356public:
357 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
358 return {};
359 }
360
361 static constexpr ::llvm::StringLiteral getOperationName() {
362 return ::llvm::StringLiteral("memref.copy");
363 }
364
365 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
366 ::mlir::Operation::operand_range getODSOperands(unsigned index);
367 ::mlir::Value source();
368 ::mlir::Value target();
369 ::mlir::MutableOperandRange sourceMutable();
370 ::mlir::MutableOperandRange targetMutable();
371 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
372 ::mlir::Operation::result_range getODSResults(unsigned index);
373 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value source, ::mlir::Value target);
374 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value source, ::mlir::Value target);
375 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
376 ::mlir::LogicalResult verify();
377 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
378 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
379 void print(::mlir::OpAsmPrinter &_odsPrinter);
380 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
381public:
382 Value getSource() { return source();}
383 Value getTarget() { return target(); }
384};
385} // namespace memref
386} // namespace mlir
387DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::CopyOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::CopyOp>(); } }
388
389namespace mlir {
390namespace memref {
391
392//===----------------------------------------------------------------------===//
393// ::mlir::memref::LoadOp declarations
394//===----------------------------------------------------------------------===//
395
396class LoadOpAdaptor {
397public:
398 LoadOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
399
400 LoadOpAdaptor(LoadOp &op);
401
402 ::mlir::ValueRange getOperands();
403 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
404 ::mlir::ValueRange getODSOperands(unsigned index);
405 ::mlir::Value memref();
406 ::mlir::ValueRange indices();
407 ::mlir::DictionaryAttr getAttributes();
408 ::mlir::LogicalResult verify(::mlir::Location loc);
409private:
410 ::mlir::ValueRange odsOperands;
411 ::mlir::DictionaryAttr odsAttrs;
412 ::mlir::RegionRange odsRegions;
413};
414class LoadOp : public ::mlir::Op<LoadOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::Type>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<1>::Impl, ::mlir::OpTrait::MemRefsNormalizable, ::mlir::MemoryEffectOpInterface::Trait> {
415public:
416 using Op::Op;
417 using Op::print;
418 using Adaptor = LoadOpAdaptor;
419public:
420 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
421 return {};
422 }
423
424 static constexpr ::llvm::StringLiteral getOperationName() {
425 return ::llvm::StringLiteral("memref.load");
426 }
427
428 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
429 ::mlir::Operation::operand_range getODSOperands(unsigned index);
430 ::mlir::Value memref();
431 ::mlir::Operation::operand_range indices();
432 ::mlir::MutableOperandRange memrefMutable();
433 ::mlir::MutableOperandRange indicesMutable();
434 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
435 ::mlir::Operation::result_range getODSResults(unsigned index);
436 ::mlir::Value result();
437 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value memref, ValueRange indices = {});
438 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::Value memref, ::mlir::ValueRange indices);
439 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value memref, ::mlir::ValueRange indices);
440 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
441 ::mlir::LogicalResult verify();
442 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
443 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
444 void print(::mlir::OpAsmPrinter &_odsPrinter);
445 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
446public:
447 Value getMemRef() { return getOperand(0); }
448 void setMemRef(Value value) { setOperand(0, value); }
449 MemRefType getMemRefType() {
450 return getMemRef().getType().cast<MemRefType>();
451 }
452
453 operand_range getIndices() { return {operand_begin() + 1, operand_end()}; }
454};
455} // namespace memref
456} // namespace mlir
457DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::LoadOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::LoadOp>(); } }
458
459namespace mlir {
460namespace memref {
461
462//===----------------------------------------------------------------------===//
463// ::mlir::memref::AllocOp declarations
464//===----------------------------------------------------------------------===//
465
466class AllocOpAdaptor {
467public:
468 AllocOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
469
470 AllocOpAdaptor(AllocOp &op);
471
472 ::mlir::ValueRange getOperands();
473 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
474 ::mlir::ValueRange getODSOperands(unsigned index);
475 ::mlir::ValueRange dynamicSizes();
476 ::mlir::ValueRange symbolOperands();
477 ::mlir::DictionaryAttr getAttributes();
478 ::mlir::IntegerAttr alignmentAttr();
479 ::llvm::Optional<uint64_t> alignment();
480 ::mlir::LogicalResult verify(::mlir::Location loc);
481private:
482 ::mlir::ValueRange odsOperands;
483 ::mlir::DictionaryAttr odsAttrs;
484 ::mlir::RegionRange odsRegions;
485};
486class AllocOp : public ::mlir::Op<AllocOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::MemRefType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::VariadicOperands, ::mlir::OpTrait::AttrSizedOperandSegments, ::mlir::MemoryEffectOpInterface::Trait> {
487public:
488 using Op::Op;
489 using Op::print;
490 using Adaptor = AllocOpAdaptor;
491public:
492 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
493 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("alignment"), ::llvm::StringRef("operand_segment_sizes")};
494 return ::llvm::makeArrayRef(attrNames);
495 }
496
497 ::mlir::StringAttr alignmentAttrName() {
498 return getAttributeNameForIndex(0);
499 }
500
501 static ::mlir::StringAttr alignmentAttrName(::mlir::OperationName name) {
502 return getAttributeNameForIndex(name, 0);
503 }
504
505 ::mlir::StringAttr operand_segment_sizesAttrName() {
506 return getAttributeNameForIndex(1);
507 }
508
509 static ::mlir::StringAttr operand_segment_sizesAttrName(::mlir::OperationName name) {
510 return getAttributeNameForIndex(name, 1);
511 }
512
513 static constexpr ::llvm::StringLiteral getOperationName() {
514 return ::llvm::StringLiteral("memref.alloc");
515 }
516
517 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
518 ::mlir::Operation::operand_range getODSOperands(unsigned index);
519 ::mlir::Operation::operand_range dynamicSizes();
520 ::mlir::Operation::operand_range symbolOperands();
521 ::mlir::MutableOperandRange dynamicSizesMutable();
522 ::mlir::MutableOperandRange symbolOperandsMutable();
523 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
524 ::mlir::Operation::result_range getODSResults(unsigned index);
525 ::mlir::Value memref();
526 ::mlir::IntegerAttr alignmentAttr();
527 ::llvm::Optional<uint64_t> alignment();
528 void alignmentAttr(::mlir::IntegerAttr attr);
529 ::mlir::Attribute removeAlignmentAttr();
530 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType memrefType, IntegerAttr alignment = IntegerAttr());
531 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType memrefType, ValueRange dynamicSizes, IntegerAttr alignment = IntegerAttr());
532 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType memrefType, ValueRange dynamicSizes, ValueRange symbolOperands, IntegerAttr alignment = {});
533 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type memref, ::mlir::ValueRange dynamicSizes, ::mlir::ValueRange symbolOperands, /*optional*/::mlir::IntegerAttr alignment);
534 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange dynamicSizes, ::mlir::ValueRange symbolOperands, /*optional*/::mlir::IntegerAttr alignment);
535 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
536 ::mlir::LogicalResult verify();
537 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
538 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
539 void print(::mlir::OpAsmPrinter &_odsPrinter);
540 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
541private:
542 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
543 return getAttributeNameForIndex((*this)->getName(), index);
544 }
545
546 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
547 assert(index < 2 && "invalid attribute index")(static_cast <bool> (index < 2 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 2 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 547, __extension__ __PRETTY_FUNCTION__))
;
548 return name.getRegisteredInfo()->getAttributeNames()[index];
549 }
550
551public:
552 static StringRef getAlignmentAttrName() { return "alignment"; }
553
554 MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
555
556 /// Returns the dynamic sizes for this alloc operation if specified.
557 operand_range getDynamicSizes() { return dynamicSizes(); }
558};
559} // namespace memref
560} // namespace mlir
561DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::AllocOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::AllocOp>(); } }
562
563namespace mlir {
564namespace memref {
565
566//===----------------------------------------------------------------------===//
567// ::mlir::memref::AllocaOp declarations
568//===----------------------------------------------------------------------===//
569
570class AllocaOpAdaptor {
571public:
572 AllocaOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
573
574 AllocaOpAdaptor(AllocaOp &op);
575
576 ::mlir::ValueRange getOperands();
577 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
578 ::mlir::ValueRange getODSOperands(unsigned index);
579 ::mlir::ValueRange dynamicSizes();
580 ::mlir::ValueRange symbolOperands();
581 ::mlir::DictionaryAttr getAttributes();
582 ::mlir::IntegerAttr alignmentAttr();
583 ::llvm::Optional<uint64_t> alignment();
584 ::mlir::LogicalResult verify(::mlir::Location loc);
585private:
586 ::mlir::ValueRange odsOperands;
587 ::mlir::DictionaryAttr odsAttrs;
588 ::mlir::RegionRange odsRegions;
589};
590class AllocaOp : public ::mlir::Op<AllocaOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::MemRefType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::VariadicOperands, ::mlir::OpTrait::AttrSizedOperandSegments, ::mlir::MemoryEffectOpInterface::Trait> {
591public:
592 using Op::Op;
593 using Op::print;
594 using Adaptor = AllocaOpAdaptor;
595public:
596 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
597 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("alignment"), ::llvm::StringRef("operand_segment_sizes")};
598 return ::llvm::makeArrayRef(attrNames);
599 }
600
601 ::mlir::StringAttr alignmentAttrName() {
602 return getAttributeNameForIndex(0);
603 }
604
605 static ::mlir::StringAttr alignmentAttrName(::mlir::OperationName name) {
606 return getAttributeNameForIndex(name, 0);
607 }
608
609 ::mlir::StringAttr operand_segment_sizesAttrName() {
610 return getAttributeNameForIndex(1);
611 }
612
613 static ::mlir::StringAttr operand_segment_sizesAttrName(::mlir::OperationName name) {
614 return getAttributeNameForIndex(name, 1);
615 }
616
617 static constexpr ::llvm::StringLiteral getOperationName() {
618 return ::llvm::StringLiteral("memref.alloca");
619 }
620
621 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
622 ::mlir::Operation::operand_range getODSOperands(unsigned index);
623 ::mlir::Operation::operand_range dynamicSizes();
624 ::mlir::Operation::operand_range symbolOperands();
625 ::mlir::MutableOperandRange dynamicSizesMutable();
626 ::mlir::MutableOperandRange symbolOperandsMutable();
627 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
628 ::mlir::Operation::result_range getODSResults(unsigned index);
629 ::mlir::Value memref();
630 ::mlir::IntegerAttr alignmentAttr();
631 ::llvm::Optional<uint64_t> alignment();
632 void alignmentAttr(::mlir::IntegerAttr attr);
633 ::mlir::Attribute removeAlignmentAttr();
634 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType memrefType, IntegerAttr alignment = IntegerAttr());
635 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType memrefType, ValueRange dynamicSizes, IntegerAttr alignment = IntegerAttr());
636 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType memrefType, ValueRange dynamicSizes, ValueRange symbolOperands, IntegerAttr alignment = {});
637 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type memref, ::mlir::ValueRange dynamicSizes, ::mlir::ValueRange symbolOperands, /*optional*/::mlir::IntegerAttr alignment);
638 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange dynamicSizes, ::mlir::ValueRange symbolOperands, /*optional*/::mlir::IntegerAttr alignment);
639 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
640 ::mlir::LogicalResult verify();
641 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
642 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
643 void print(::mlir::OpAsmPrinter &_odsPrinter);
644 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
645private:
646 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
647 return getAttributeNameForIndex((*this)->getName(), index);
648 }
649
650 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
651 assert(index < 2 && "invalid attribute index")(static_cast <bool> (index < 2 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 2 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 651, __extension__ __PRETTY_FUNCTION__))
;
652 return name.getRegisteredInfo()->getAttributeNames()[index];
653 }
654
655public:
656 static StringRef getAlignmentAttrName() { return "alignment"; }
657
658 MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
659
660 /// Returns the dynamic sizes for this alloc operation if specified.
661 operand_range getDynamicSizes() { return dynamicSizes(); }
662};
663} // namespace memref
664} // namespace mlir
665DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::AllocaOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::AllocaOp>(); } }
666
667namespace mlir {
668namespace memref {
669
670//===----------------------------------------------------------------------===//
671// ::mlir::memref::AllocaScopeOp declarations
672//===----------------------------------------------------------------------===//
673
674class AllocaScopeOpAdaptor {
675public:
676 AllocaScopeOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
677
678 AllocaScopeOpAdaptor(AllocaScopeOp &op);
679
680 ::mlir::ValueRange getOperands();
681 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
682 ::mlir::ValueRange getODSOperands(unsigned index);
683 ::mlir::DictionaryAttr getAttributes();
684 ::mlir::RegionRange getRegions();
685 ::mlir::Region &bodyRegion();
686 ::mlir::LogicalResult verify(::mlir::Location loc);
687private:
688 ::mlir::ValueRange odsOperands;
689 ::mlir::DictionaryAttr odsAttrs;
690 ::mlir::RegionRange odsRegions;
691};
692class AllocaScopeOp : public ::mlir::Op<AllocaScopeOp, ::mlir::OpTrait::OneRegion, ::mlir::OpTrait::VariadicResults, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::ZeroOperands, ::mlir::OpTrait::AutomaticAllocationScope, ::mlir::RegionBranchOpInterface::Trait, ::mlir::OpTrait::SingleBlockImplicitTerminator<AllocaScopeReturnOp>::Impl, ::mlir::OpTrait::HasRecursiveSideEffects, ::mlir::OpTrait::NoRegionArguments> {
693public:
694 using Op::Op;
695 using Op::print;
696 using Adaptor = AllocaScopeOpAdaptor;
697public:
698 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
699 return {};
700 }
701
702 static constexpr ::llvm::StringLiteral getOperationName() {
703 return ::llvm::StringLiteral("memref.alloca_scope");
704 }
705
706 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
707 ::mlir::Operation::operand_range getODSOperands(unsigned index);
708 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
709 ::mlir::Operation::result_range getODSResults(unsigned index);
710 ::mlir::Operation::result_range results();
711 ::mlir::Region &bodyRegion();
712 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange results);
713 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
714 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
715 void print(::mlir::OpAsmPrinter &p);
716 ::mlir::LogicalResult verify();
717 void getSuccessorRegions(::mlir::Optional<unsigned> index, ::mlir::ArrayRef<::mlir::Attribute> operands, ::mlir::SmallVectorImpl<::mlir::RegionSuccessor> &regions);
718public:
719};
720} // namespace memref
721} // namespace mlir
722DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::AllocaScopeOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::AllocaScopeOp>(); } }
723
724namespace mlir {
725namespace memref {
726
727//===----------------------------------------------------------------------===//
728// ::mlir::memref::AllocaScopeReturnOp declarations
729//===----------------------------------------------------------------------===//
730
731class AllocaScopeReturnOpAdaptor {
732public:
733 AllocaScopeReturnOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
734
735 AllocaScopeReturnOpAdaptor(AllocaScopeReturnOp &op);
736
737 ::mlir::ValueRange getOperands();
738 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
739 ::mlir::ValueRange getODSOperands(unsigned index);
740 ::mlir::ValueRange results();
741 ::mlir::DictionaryAttr getAttributes();
742 ::mlir::LogicalResult verify(::mlir::Location loc);
743private:
744 ::mlir::ValueRange odsOperands;
745 ::mlir::DictionaryAttr odsAttrs;
746 ::mlir::RegionRange odsRegions;
747};
748class AllocaScopeReturnOp : public ::mlir::Op<AllocaScopeReturnOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::VariadicOperands, ::mlir::OpTrait::HasParent<AllocaScopeOp>::Impl, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::OpTrait::ReturnLike, ::mlir::OpTrait::IsTerminator> {
749public:
750 using Op::Op;
751 using Op::print;
752 using Adaptor = AllocaScopeReturnOpAdaptor;
753public:
754 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
755 return {};
756 }
757
758 static constexpr ::llvm::StringLiteral getOperationName() {
759 return ::llvm::StringLiteral("memref.alloca_scope.return");
760 }
761
762 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
763 ::mlir::Operation::operand_range getODSOperands(unsigned index);
764 ::mlir::Operation::operand_range results();
765 ::mlir::MutableOperandRange resultsMutable();
766 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
767 ::mlir::Operation::result_range getODSResults(unsigned index);
768 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState);
769 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::ValueRange results);
770 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
771 ::mlir::LogicalResult verify();
772 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
773 void print(::mlir::OpAsmPrinter &_odsPrinter);
774 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
775public:
776};
777} // namespace memref
778} // namespace mlir
779DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::AllocaScopeReturnOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::AllocaScopeReturnOp>(); } }
780
781namespace mlir {
782namespace memref {
783
784//===----------------------------------------------------------------------===//
785// ::mlir::memref::CastOp declarations
786//===----------------------------------------------------------------------===//
787
788class CastOpAdaptor {
789public:
790 CastOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
791
792 CastOpAdaptor(CastOp &op);
793
794 ::mlir::ValueRange getOperands();
795 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
796 ::mlir::ValueRange getODSOperands(unsigned index);
797 ::mlir::Value source();
798 ::mlir::DictionaryAttr getAttributes();
799 ::mlir::LogicalResult verify(::mlir::Location loc);
800private:
801 ::mlir::ValueRange odsOperands;
802 ::mlir::DictionaryAttr odsAttrs;
803 ::mlir::RegionRange odsRegions;
804};
805class CastOp : public ::mlir::Op<CastOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::Type>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::OneOperand, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::OpTrait::SameOperandsAndResultShape, ::mlir::CastOpInterface::Trait, ::mlir::ViewLikeOpInterface::Trait, ::mlir::OpTrait::MemRefsNormalizable> {
806public:
807 using Op::Op;
808 using Op::print;
809 using Adaptor = CastOpAdaptor;
810public:
811 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
812 return {};
813 }
814
815 static constexpr ::llvm::StringLiteral getOperationName() {
816 return ::llvm::StringLiteral("memref.cast");
817 }
818
819 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
820 ::mlir::Operation::operand_range getODSOperands(unsigned index);
821 ::mlir::Value source();
822 ::mlir::MutableOperandRange sourceMutable();
823 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
824 ::mlir::Operation::result_range getODSResults(unsigned index);
825 ::mlir::Value dest();
826 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value source, Type destType);
827 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type dest, ::mlir::Value source);
828 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value source);
829 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
830 ::mlir::LogicalResult verify();
831 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
832 static bool areCastCompatible(::mlir::TypeRange inputs, ::mlir::TypeRange outputs);
833 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
834 void print(::mlir::OpAsmPrinter &_odsPrinter);
835 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
836public:
837 /// Fold the given CastOp into consumer op.
838 static bool canFoldIntoConsumerOp(CastOp castOp);
839
840 Value getViewSource() { return source(); }
841};
842} // namespace memref
843} // namespace mlir
844DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::CastOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::CastOp>(); } }
845
846namespace mlir {
847namespace memref {
848
849//===----------------------------------------------------------------------===//
850// ::mlir::memref::CollapseShapeOp declarations
851//===----------------------------------------------------------------------===//
852
853class CollapseShapeOpAdaptor {
854public:
855 CollapseShapeOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
856
857 CollapseShapeOpAdaptor(CollapseShapeOp &op);
858
859 ::mlir::ValueRange getOperands();
860 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
861 ::mlir::ValueRange getODSOperands(unsigned index);
862 ::mlir::Value src();
863 ::mlir::DictionaryAttr getAttributes();
864 ::mlir::ArrayAttr reassociationAttr();
865 ::mlir::ArrayAttr reassociation();
866 ::mlir::LogicalResult verify(::mlir::Location loc);
867private:
868 ::mlir::ValueRange odsOperands;
869 ::mlir::DictionaryAttr odsAttrs;
870 ::mlir::RegionRange odsRegions;
871};
872class CollapseShapeOp : public ::mlir::Op<CollapseShapeOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::Type>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::OneOperand, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::ViewLikeOpInterface::Trait> {
873public:
874 using Op::Op;
875 using Op::print;
876 using Adaptor = CollapseShapeOpAdaptor;
877public:
878 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
879 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("reassociation")};
880 return ::llvm::makeArrayRef(attrNames);
881 }
882
883 ::mlir::StringAttr reassociationAttrName() {
884 return getAttributeNameForIndex(0);
885 }
886
887 static ::mlir::StringAttr reassociationAttrName(::mlir::OperationName name) {
888 return getAttributeNameForIndex(name, 0);
889 }
890
891 static constexpr ::llvm::StringLiteral getOperationName() {
892 return ::llvm::StringLiteral("memref.collapse_shape");
893 }
894
895 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
896 ::mlir::Operation::operand_range getODSOperands(unsigned index);
897 ::mlir::Value src();
898 ::mlir::MutableOperandRange srcMutable();
899 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
900 ::mlir::Operation::result_range getODSResults(unsigned index);
901 ::mlir::Value result();
902 ::mlir::ArrayAttr reassociationAttr();
903 ::mlir::ArrayAttr reassociation();
904 void reassociationAttr(::mlir::ArrayAttr attr);
905 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value src, ArrayRef<ReassociationIndices> reassociation, ArrayRef<NamedAttribute> attrs = {});
906 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value src, ArrayRef<ReassociationExprs> reassociation, ArrayRef<NamedAttribute> attrs = {});
907 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Type resultType, Value src, ArrayRef<ReassociationIndices> reassociation, ArrayRef<NamedAttribute> attrs = {});
908 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Type resultType, Value src, ArrayRef<ReassociationExprs> reassociation, ArrayRef<NamedAttribute> attrs = {});
909 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::Value src, ::mlir::ArrayAttr reassociation);
910 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value src, ::mlir::ArrayAttr reassociation);
911 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
912 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
913 void print(::mlir::OpAsmPrinter &p);
914 ::mlir::LogicalResult verify();
915 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
916 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
917 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
918private:
919 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
920 return getAttributeNameForIndex((*this)->getName(), index);
921 }
922
923 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
924 assert(index < 1 && "invalid attribute index")(static_cast <bool> (index < 1 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 1 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 924, __extension__ __PRETTY_FUNCTION__))
;
925 return name.getRegisteredInfo()->getAttributeNames()[index];
926 }
927
928public:
929 SmallVector<AffineMap, 4> getReassociationMaps();
930 SmallVector<ReassociationExprs, 4> getReassociationExprs();
931 SmallVector<ReassociationIndices, 4> getReassociationIndices() {
932 SmallVector<ReassociationIndices, 4> reassociationIndices;
933 for (auto attr : reassociation())
934 reassociationIndices.push_back(llvm::to_vector<2>(
935 llvm::map_range(attr.cast<ArrayAttr>(), [&](Attribute indexAttr) {
936 return indexAttr.cast<IntegerAttr>().getInt();
937 })));
938 return reassociationIndices;
939 };
940 MemRefType getSrcType() { return src().getType().cast<MemRefType>(); }
941 MemRefType getResultType() { return result().getType().cast<MemRefType>(); }
942 Value getViewSource() { return src(); }
943};
944} // namespace memref
945} // namespace mlir
946DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::CollapseShapeOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::CollapseShapeOp>(); } }
947
948namespace mlir {
949namespace memref {
950
951//===----------------------------------------------------------------------===//
952// ::mlir::memref::DeallocOp declarations
953//===----------------------------------------------------------------------===//
954
955class DeallocOpAdaptor {
956public:
957 DeallocOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
958
959 DeallocOpAdaptor(DeallocOp &op);
960
961 ::mlir::ValueRange getOperands();
962 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
963 ::mlir::ValueRange getODSOperands(unsigned index);
964 ::mlir::Value memref();
965 ::mlir::DictionaryAttr getAttributes();
966 ::mlir::LogicalResult verify(::mlir::Location loc);
967private:
968 ::mlir::ValueRange odsOperands;
969 ::mlir::DictionaryAttr odsAttrs;
970 ::mlir::RegionRange odsRegions;
971};
972class DeallocOp : public ::mlir::Op<DeallocOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::OneOperand, ::mlir::OpTrait::MemRefsNormalizable, ::mlir::MemoryEffectOpInterface::Trait> {
973public:
974 using Op::Op;
975 using Op::print;
976 using Adaptor = DeallocOpAdaptor;
977public:
978 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
979 return {};
980 }
981
982 static constexpr ::llvm::StringLiteral getOperationName() {
983 return ::llvm::StringLiteral("memref.dealloc");
984 }
985
986 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
987 ::mlir::Operation::operand_range getODSOperands(unsigned index);
988 ::mlir::Value memref();
989 ::mlir::MutableOperandRange memrefMutable();
990 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
991 ::mlir::Operation::result_range getODSResults(unsigned index);
992 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value memref);
993 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value memref);
994 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
995 ::mlir::LogicalResult verify();
996 ::mlir::LogicalResult fold(::llvm::ArrayRef<::mlir::Attribute> operands, ::llvm::SmallVectorImpl<::mlir::OpFoldResult> &results);
997 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
998 void print(::mlir::OpAsmPrinter &_odsPrinter);
999 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
1000public:
1001};
1002} // namespace memref
1003} // namespace mlir
1004DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::DeallocOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::DeallocOp>(); } }
1005
1006namespace mlir {
1007namespace memref {
1008
1009//===----------------------------------------------------------------------===//
1010// ::mlir::memref::DimOp declarations
1011//===----------------------------------------------------------------------===//
1012
1013class DimOpAdaptor {
1014public:
1015 DimOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1016
1017 DimOpAdaptor(DimOp &op);
1018
1019 ::mlir::ValueRange getOperands();
1020 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1021 ::mlir::ValueRange getODSOperands(unsigned index);
1022 ::mlir::Value source();
1023 ::mlir::Value index();
1024 ::mlir::DictionaryAttr getAttributes();
1025 ::mlir::LogicalResult verify(::mlir::Location loc);
1026private:
1027 ::mlir::ValueRange odsOperands;
1028 ::mlir::DictionaryAttr odsAttrs;
1029 ::mlir::RegionRange odsRegions;
1030};
1031class DimOp : public ::mlir::Op<DimOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::IndexType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::NOperands<2>::Impl, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::OpTrait::MemRefsNormalizable> {
1032public:
1033 using Op::Op;
1034 using Op::print;
1035 using Adaptor = DimOpAdaptor;
1036public:
1037 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1038 return {};
1039 }
1040
1041 static constexpr ::llvm::StringLiteral getOperationName() {
1042 return ::llvm::StringLiteral("memref.dim");
1043 }
1044
1045 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1046 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1047 ::mlir::Value source();
1048 ::mlir::Value index();
1049 ::mlir::MutableOperandRange sourceMutable();
1050 ::mlir::MutableOperandRange indexMutable();
1051 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1052 ::mlir::Operation::result_range getODSResults(unsigned index);
1053 ::mlir::Value result();
1054 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value source, int64_t index);
1055 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value source, Value index);
1056 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::Value source, ::mlir::Value index);
1057 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value source, ::mlir::Value index);
1058 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1059 ::mlir::LogicalResult verify();
1060 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
1061 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
1062 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1063 void print(::mlir::OpAsmPrinter &_odsPrinter);
1064 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
1065public:
1066 /// Helper function to get the index as a simple integer if it is constant.
1067 Optional<int64_t> getConstantIndex();
1068};
1069} // namespace memref
1070} // namespace mlir
1071DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::DimOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::DimOp>(); } }
1072
1073namespace mlir {
1074namespace memref {
1075
1076//===----------------------------------------------------------------------===//
1077// ::mlir::memref::DmaStartOp declarations
1078//===----------------------------------------------------------------------===//
1079
1080class DmaStartOpAdaptor {
1081public:
1082 DmaStartOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1083
1084 DmaStartOpAdaptor(DmaStartOp &op);
1085
1086 ::mlir::ValueRange getOperands();
1087 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1088 ::mlir::ValueRange getODSOperands(unsigned index);
1089 ::mlir::ValueRange operands();
1090 ::mlir::DictionaryAttr getAttributes();
1091 ::mlir::LogicalResult verify(::mlir::Location loc);
1092private:
1093 ::mlir::ValueRange odsOperands;
1094 ::mlir::DictionaryAttr odsAttrs;
1095 ::mlir::RegionRange odsRegions;
1096};
1097class DmaStartOp : public ::mlir::Op<DmaStartOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::VariadicOperands> {
1098public:
1099 using Op::Op;
1100 using Op::print;
1101 using Adaptor = DmaStartOpAdaptor;
1102public:
1103 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1104 return {};
1105 }
1106
1107 static constexpr ::llvm::StringLiteral getOperationName() {
1108 return ::llvm::StringLiteral("memref.dma_start");
1109 }
1110
1111 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1112 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1113 ::mlir::Operation::operand_range operands();
1114 ::mlir::MutableOperandRange operandsMutable();
1115 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1116 ::mlir::Operation::result_range getODSResults(unsigned index);
1117 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value srcMemRef, ValueRange srcIndices, Value destMemRef, ValueRange destIndices, Value numElements, Value tagMemRef, ValueRange tagIndices, Value stride = {}, Value elementsPerStride = {});
1118 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::ValueRange operands);
1119 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1120 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1121 void print(::mlir::OpAsmPrinter &p);
1122 ::mlir::LogicalResult verify();
1123 ::mlir::LogicalResult fold(::llvm::ArrayRef<::mlir::Attribute> operands, ::llvm::SmallVectorImpl<::mlir::OpFoldResult> &results);
1124public:
1125 // Returns the source MemRefType for this DMA operation.
1126 Value getSrcMemRef() { return getOperand(0); }
1127 // Returns the rank (number of indices) of the source MemRefType.
1128 unsigned getSrcMemRefRank() {
1129 return getSrcMemRef().getType().cast<MemRefType>().getRank();
1130 }
1131 // Returns the source memref indices for this DMA operation.
1132 operand_range getSrcIndices() {
1133 return {(*this)->operand_begin() + 1,
1134 (*this)->operand_begin() + 1 + getSrcMemRefRank()};
1135 }
1136
1137 // Returns the destination MemRefType for this DMA operations.
1138 Value getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
1139 // Returns the rank (number of indices) of the destination MemRefType.
1140 unsigned getDstMemRefRank() {
1141 return getDstMemRef().getType().cast<MemRefType>().getRank();
1142 }
1143 unsigned getSrcMemorySpace() {
1144 return getSrcMemRef().getType().cast<MemRefType>().getMemorySpaceAsInt();
1145 }
1146 unsigned getDstMemorySpace() {
1147 return getDstMemRef().getType().cast<MemRefType>().getMemorySpaceAsInt();
1148 }
1149
1150 // Returns the destination memref indices for this DMA operation.
1151 operand_range getDstIndices() {
1152 return {(*this)->operand_begin() + 1 + getSrcMemRefRank() + 1,
1153 (*this)->operand_begin() + 1 + getSrcMemRefRank() + 1 +
1154 getDstMemRefRank()};
1155 }
1156
1157 // Returns the number of elements being transferred by this DMA operation.
1158 Value getNumElements() {
1159 return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank());
1160 }
1161
1162 // Returns the Tag MemRef for this DMA operation.
1163 Value getTagMemRef() {
1164 return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1);
1165 }
1166 // Returns the rank (number of indices) of the tag MemRefType.
1167 unsigned getTagMemRefRank() {
1168 return getTagMemRef().getType().cast<MemRefType>().getRank();
1169 }
1170
1171 // Returns the tag memref index for this DMA operation.
1172 operand_range getTagIndices() {
1173 unsigned tagIndexStartPos =
1174 1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1 + 1;
1175 return {(*this)->operand_begin() + tagIndexStartPos,
1176 (*this)->operand_begin() + tagIndexStartPos + getTagMemRefRank()};
1177 }
1178
1179 /// Returns true if this is a DMA from a faster memory space to a slower
1180 /// one.
1181 bool isDestMemorySpaceFaster() {
1182 return (getSrcMemorySpace() < getDstMemorySpace());
1183 }
1184
1185 /// Returns true if this is a DMA from a slower memory space to a faster
1186 /// one.
1187 bool isSrcMemorySpaceFaster() {
1188 // Assumes that a lower number is for a slower memory space.
1189 return (getDstMemorySpace() < getSrcMemorySpace());
1190 }
1191
1192 /// Given a DMA start operation, returns the operand position of either the
1193 /// source or destination memref depending on the one that is at the higher
1194 /// level of the memory hierarchy. Asserts failure if neither is true.
1195 unsigned getFasterMemPos() {
1196 assert(isSrcMemorySpaceFaster() || isDestMemorySpaceFaster())(static_cast <bool> (isSrcMemorySpaceFaster() || isDestMemorySpaceFaster
()) ? void (0) : __assert_fail ("isSrcMemorySpaceFaster() || isDestMemorySpaceFaster()"
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 1196, __extension__ __PRETTY_FUNCTION__))
;
1197 return isSrcMemorySpaceFaster() ? 0 : getSrcMemRefRank() + 1;
1198 }
1199
1200 bool isStrided() {
1201 return getNumOperands() != 1 + getSrcMemRefRank() + 1 +
19
Assuming the condition is false
20
Returning zero, which participates in a condition later
1202 getDstMemRefRank() + 1 + 1 +
1203 getTagMemRefRank();
1204 }
1205
1206 Value getStride() {
1207 if (!isStrided())
18
Calling 'DmaStartOp::isStrided'
21
Returning from 'DmaStartOp::isStrided'
22
Taking true branch
1208 return nullptr;
23
Passing null pointer value via 1st parameter 'impl'
24
Calling default constructor for 'Value'
26
Returning from default constructor for 'Value'
1209 return getOperand(getNumOperands() - 1 - 1);
1210 }
1211
1212 Value getNumElementsPerStride() {
1213 if (!isStrided())
1214 return nullptr;
1215 return getOperand(getNumOperands() - 1);
1216 }
1217};
1218} // namespace memref
1219} // namespace mlir
1220DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::DmaStartOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::DmaStartOp>(); } }
1221
1222namespace mlir {
1223namespace memref {
1224
1225//===----------------------------------------------------------------------===//
1226// ::mlir::memref::DmaWaitOp declarations
1227//===----------------------------------------------------------------------===//
1228
1229class DmaWaitOpAdaptor {
1230public:
1231 DmaWaitOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1232
1233 DmaWaitOpAdaptor(DmaWaitOp &op);
1234
1235 ::mlir::ValueRange getOperands();
1236 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1237 ::mlir::ValueRange getODSOperands(unsigned index);
1238 ::mlir::Value tagMemRef();
1239 ::mlir::ValueRange tagIndices();
1240 ::mlir::Value numElements();
1241 ::mlir::DictionaryAttr getAttributes();
1242 ::mlir::LogicalResult verify(::mlir::Location loc);
1243private:
1244 ::mlir::ValueRange odsOperands;
1245 ::mlir::DictionaryAttr odsAttrs;
1246 ::mlir::RegionRange odsRegions;
1247};
1248class DmaWaitOp : public ::mlir::Op<DmaWaitOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<2>::Impl> {
1249public:
1250 using Op::Op;
1251 using Op::print;
1252 using Adaptor = DmaWaitOpAdaptor;
1253public:
1254 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1255 return {};
1256 }
1257
1258 static constexpr ::llvm::StringLiteral getOperationName() {
1259 return ::llvm::StringLiteral("memref.dma_wait");
1260 }
1261
1262 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1263 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1264 ::mlir::Value tagMemRef();
1265 ::mlir::Operation::operand_range tagIndices();
1266 ::mlir::Value numElements();
1267 ::mlir::MutableOperandRange tagMemRefMutable();
1268 ::mlir::MutableOperandRange tagIndicesMutable();
1269 ::mlir::MutableOperandRange numElementsMutable();
1270 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1271 ::mlir::Operation::result_range getODSResults(unsigned index);
1272 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value tagMemRef, ::mlir::ValueRange tagIndices, ::mlir::Value numElements);
1273 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value tagMemRef, ::mlir::ValueRange tagIndices, ::mlir::Value numElements);
1274 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1275 ::mlir::LogicalResult verify();
1276 ::mlir::LogicalResult fold(::llvm::ArrayRef<::mlir::Attribute> operands, ::llvm::SmallVectorImpl<::mlir::OpFoldResult> &results);
1277 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1278 void print(::mlir::OpAsmPrinter &_odsPrinter);
1279public:
1280 /// Returns the Tag MemRef associated with the DMA operation being waited
1281 /// on.
1282 Value getTagMemRef() { return tagMemRef(); }
1283
1284 /// Returns the tag memref index for this DMA operation.
1285 operand_range getTagIndices() { return tagIndices(); }
1286
1287 /// Returns the rank (number of indices) of the tag memref.
1288 unsigned getTagMemRefRank() {
1289 return getTagMemRef().getType().cast<MemRefType>().getRank();
1290 }
1291
1292 /// Returns the number of elements transferred in the associated DMA
1293 /// operation.
1294 Value getNumElements() { return numElements(); }
1295};
1296} // namespace memref
1297} // namespace mlir
1298DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::DmaWaitOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::DmaWaitOp>(); } }
1299
1300namespace mlir {
1301namespace memref {
1302
1303//===----------------------------------------------------------------------===//
1304// ::mlir::memref::ExpandShapeOp declarations
1305//===----------------------------------------------------------------------===//
1306
1307class ExpandShapeOpAdaptor {
1308public:
1309 ExpandShapeOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1310
1311 ExpandShapeOpAdaptor(ExpandShapeOp &op);
1312
1313 ::mlir::ValueRange getOperands();
1314 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1315 ::mlir::ValueRange getODSOperands(unsigned index);
1316 ::mlir::Value src();
1317 ::mlir::DictionaryAttr getAttributes();
1318 ::mlir::ArrayAttr reassociationAttr();
1319 ::mlir::ArrayAttr reassociation();
1320 ::mlir::LogicalResult verify(::mlir::Location loc);
1321private:
1322 ::mlir::ValueRange odsOperands;
1323 ::mlir::DictionaryAttr odsAttrs;
1324 ::mlir::RegionRange odsRegions;
1325};
1326class ExpandShapeOp : public ::mlir::Op<ExpandShapeOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::Type>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::OneOperand, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::ViewLikeOpInterface::Trait> {
1327public:
1328 using Op::Op;
1329 using Op::print;
1330 using Adaptor = ExpandShapeOpAdaptor;
1331public:
1332 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1333 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("reassociation")};
1334 return ::llvm::makeArrayRef(attrNames);
1335 }
1336
1337 ::mlir::StringAttr reassociationAttrName() {
1338 return getAttributeNameForIndex(0);
1339 }
1340
1341 static ::mlir::StringAttr reassociationAttrName(::mlir::OperationName name) {
1342 return getAttributeNameForIndex(name, 0);
1343 }
1344
1345 static constexpr ::llvm::StringLiteral getOperationName() {
1346 return ::llvm::StringLiteral("memref.expand_shape");
1347 }
1348
1349 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1350 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1351 ::mlir::Value src();
1352 ::mlir::MutableOperandRange srcMutable();
1353 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1354 ::mlir::Operation::result_range getODSResults(unsigned index);
1355 ::mlir::Value result();
1356 ::mlir::ArrayAttr reassociationAttr();
1357 ::mlir::ArrayAttr reassociation();
1358 void reassociationAttr(::mlir::ArrayAttr attr);
1359 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value src, ArrayRef<ReassociationIndices> reassociation, ArrayRef<NamedAttribute> attrs = {});
1360 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value src, ArrayRef<ReassociationExprs> reassociation, ArrayRef<NamedAttribute> attrs = {});
1361 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Type resultType, Value src, ArrayRef<ReassociationIndices> reassociation, ArrayRef<NamedAttribute> attrs = {});
1362 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Type resultType, Value src, ArrayRef<ReassociationExprs> reassociation, ArrayRef<NamedAttribute> attrs = {});
1363 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::Value src, ::mlir::ArrayAttr reassociation);
1364 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value src, ::mlir::ArrayAttr reassociation);
1365 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1366 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1367 void print(::mlir::OpAsmPrinter &p);
1368 ::mlir::LogicalResult verify();
1369 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
1370 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
1371 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
1372private:
1373 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
1374 return getAttributeNameForIndex((*this)->getName(), index);
1375 }
1376
1377 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
1378 assert(index < 1 && "invalid attribute index")(static_cast <bool> (index < 1 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 1 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 1378, __extension__ __PRETTY_FUNCTION__))
;
1379 return name.getRegisteredInfo()->getAttributeNames()[index];
1380 }
1381
1382public:
1383 SmallVector<AffineMap, 4> getReassociationMaps();
1384 SmallVector<ReassociationExprs, 4> getReassociationExprs();
1385 SmallVector<ReassociationIndices, 4> getReassociationIndices() {
1386 SmallVector<ReassociationIndices, 4> reassociationIndices;
1387 for (auto attr : reassociation())
1388 reassociationIndices.push_back(llvm::to_vector<2>(
1389 llvm::map_range(attr.cast<ArrayAttr>(), [&](Attribute indexAttr) {
1390 return indexAttr.cast<IntegerAttr>().getInt();
1391 })));
1392 return reassociationIndices;
1393 };
1394 MemRefType getSrcType() { return src().getType().cast<MemRefType>(); }
1395 MemRefType getResultType() { return result().getType().cast<MemRefType>(); }
1396 Value getViewSource() { return src(); }
1397};
1398} // namespace memref
1399} // namespace mlir
1400DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::ExpandShapeOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::ExpandShapeOp>(); } }
1401
1402namespace mlir {
1403namespace memref {
1404
1405//===----------------------------------------------------------------------===//
1406// ::mlir::memref::GetGlobalOp declarations
1407//===----------------------------------------------------------------------===//
1408
1409class GetGlobalOpAdaptor {
1410public:
1411 GetGlobalOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1412
1413 GetGlobalOpAdaptor(GetGlobalOp &op);
1414
1415 ::mlir::ValueRange getOperands();
1416 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1417 ::mlir::ValueRange getODSOperands(unsigned index);
1418 ::mlir::DictionaryAttr getAttributes();
1419 ::mlir::FlatSymbolRefAttr nameAttr();
1420 ::llvm::StringRef name();
1421 ::mlir::LogicalResult verify(::mlir::Location loc);
1422private:
1423 ::mlir::ValueRange odsOperands;
1424 ::mlir::DictionaryAttr odsAttrs;
1425 ::mlir::RegionRange odsRegions;
1426};
1427class GetGlobalOp : public ::mlir::Op<GetGlobalOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::MemRefType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::ZeroOperands, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::SymbolUserOpInterface::Trait> {
1428public:
1429 using Op::Op;
1430 using Op::print;
1431 using Adaptor = GetGlobalOpAdaptor;
1432public:
1433 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1434 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("name")};
1435 return ::llvm::makeArrayRef(attrNames);
1436 }
1437
1438 ::mlir::StringAttr nameAttrName() {
1439 return getAttributeNameForIndex(0);
1440 }
1441
1442 static ::mlir::StringAttr nameAttrName(::mlir::OperationName name) {
1443 return getAttributeNameForIndex(name, 0);
1444 }
1445
1446 static constexpr ::llvm::StringLiteral getOperationName() {
1447 return ::llvm::StringLiteral("memref.get_global");
1448 }
1449
1450 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1451 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1452 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1453 ::mlir::Operation::result_range getODSResults(unsigned index);
1454 ::mlir::Value result();
1455 ::mlir::FlatSymbolRefAttr nameAttr();
1456 ::llvm::StringRef name();
1457 void nameAttr(::mlir::FlatSymbolRefAttr attr);
1458 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::FlatSymbolRefAttr name);
1459 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::FlatSymbolRefAttr name);
1460 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::llvm::StringRef name);
1461 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::llvm::StringRef name);
1462 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1463 ::mlir::LogicalResult verify();
1464 ::mlir::LogicalResult verifySymbolUses(::mlir::SymbolTableCollection &symbolTable);
1465 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1466 void print(::mlir::OpAsmPrinter &_odsPrinter);
1467 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
1468private:
1469 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
1470 return getAttributeNameForIndex((*this)->getName(), index);
1471 }
1472
1473 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
1474 assert(index < 1 && "invalid attribute index")(static_cast <bool> (index < 1 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 1 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 1474, __extension__ __PRETTY_FUNCTION__))
;
1475 return name.getRegisteredInfo()->getAttributeNames()[index];
1476 }
1477
1478public:
1479};
1480} // namespace memref
1481} // namespace mlir
1482DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::GetGlobalOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::GetGlobalOp>(); } }
1483
1484namespace mlir {
1485namespace memref {
1486
1487//===----------------------------------------------------------------------===//
1488// ::mlir::memref::GlobalOp declarations
1489//===----------------------------------------------------------------------===//
1490
1491class GlobalOpAdaptor {
1492public:
1493 GlobalOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1494
1495 GlobalOpAdaptor(GlobalOp &op);
1496
1497 ::mlir::ValueRange getOperands();
1498 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1499 ::mlir::ValueRange getODSOperands(unsigned index);
1500 ::mlir::DictionaryAttr getAttributes();
1501 ::mlir::StringAttr sym_nameAttr();
1502 ::llvm::StringRef sym_name();
1503 ::mlir::StringAttr sym_visibilityAttr();
1504 ::llvm::Optional< ::llvm::StringRef > sym_visibility();
1505 ::mlir::TypeAttr typeAttr();
1506 ::mlir::MemRefType type();
1507 ::mlir::Attribute initial_valueAttr();
1508 ::llvm::Optional<::mlir::Attribute> initial_value();
1509 ::mlir::UnitAttr constantAttr();
1510 bool constant();
1511 ::mlir::IntegerAttr alignmentAttr();
1512 ::llvm::Optional<uint64_t> alignment();
1513 ::mlir::LogicalResult verify(::mlir::Location loc);
1514private:
1515 ::mlir::ValueRange odsOperands;
1516 ::mlir::DictionaryAttr odsAttrs;
1517 ::mlir::RegionRange odsRegions;
1518};
1519class GlobalOp : public ::mlir::Op<GlobalOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::ZeroOperands, ::mlir::SymbolOpInterface::Trait> {
1520public:
1521 using Op::Op;
1522 using Op::print;
1523 using Adaptor = GlobalOpAdaptor;
1524public:
1525 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1526 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("sym_name"), ::llvm::StringRef("sym_visibility"), ::llvm::StringRef("type"), ::llvm::StringRef("initial_value"), ::llvm::StringRef("constant"), ::llvm::StringRef("alignment")};
1527 return ::llvm::makeArrayRef(attrNames);
1528 }
1529
1530 ::mlir::StringAttr sym_nameAttrName() {
1531 return getAttributeNameForIndex(0);
1532 }
1533
1534 static ::mlir::StringAttr sym_nameAttrName(::mlir::OperationName name) {
1535 return getAttributeNameForIndex(name, 0);
1536 }
1537
1538 ::mlir::StringAttr sym_visibilityAttrName() {
1539 return getAttributeNameForIndex(1);
1540 }
1541
1542 static ::mlir::StringAttr sym_visibilityAttrName(::mlir::OperationName name) {
1543 return getAttributeNameForIndex(name, 1);
1544 }
1545
1546 ::mlir::StringAttr typeAttrName() {
1547 return getAttributeNameForIndex(2);
1548 }
1549
1550 static ::mlir::StringAttr typeAttrName(::mlir::OperationName name) {
1551 return getAttributeNameForIndex(name, 2);
1552 }
1553
1554 ::mlir::StringAttr initial_valueAttrName() {
1555 return getAttributeNameForIndex(3);
1556 }
1557
1558 static ::mlir::StringAttr initial_valueAttrName(::mlir::OperationName name) {
1559 return getAttributeNameForIndex(name, 3);
1560 }
1561
1562 ::mlir::StringAttr constantAttrName() {
1563 return getAttributeNameForIndex(4);
1564 }
1565
1566 static ::mlir::StringAttr constantAttrName(::mlir::OperationName name) {
1567 return getAttributeNameForIndex(name, 4);
1568 }
1569
1570 ::mlir::StringAttr alignmentAttrName() {
1571 return getAttributeNameForIndex(5);
1572 }
1573
1574 static ::mlir::StringAttr alignmentAttrName(::mlir::OperationName name) {
1575 return getAttributeNameForIndex(name, 5);
1576 }
1577
1578 static constexpr ::llvm::StringLiteral getOperationName() {
1579 return ::llvm::StringLiteral("memref.global");
1580 }
1581
1582 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1583 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1584 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1585 ::mlir::Operation::result_range getODSResults(unsigned index);
1586 ::mlir::StringAttr sym_nameAttr();
1587 ::llvm::StringRef sym_name();
1588 ::mlir::StringAttr sym_visibilityAttr();
1589 ::llvm::Optional< ::llvm::StringRef > sym_visibility();
1590 ::mlir::TypeAttr typeAttr();
1591 ::mlir::MemRefType type();
1592 ::mlir::Attribute initial_valueAttr();
1593 ::llvm::Optional<::mlir::Attribute> initial_value();
1594 ::mlir::UnitAttr constantAttr();
1595 bool constant();
1596 ::mlir::IntegerAttr alignmentAttr();
1597 ::llvm::Optional<uint64_t> alignment();
1598 void sym_nameAttr(::mlir::StringAttr attr);
1599 void sym_visibilityAttr(::mlir::StringAttr attr);
1600 void typeAttr(::mlir::TypeAttr attr);
1601 void initial_valueAttr(::mlir::Attribute attr);
1602 void constantAttr(::mlir::UnitAttr attr);
1603 void alignmentAttr(::mlir::IntegerAttr attr);
1604 ::mlir::Attribute removeSym_visibilityAttr();
1605 ::mlir::Attribute removeInitial_valueAttr();
1606 ::mlir::Attribute removeConstantAttr();
1607 ::mlir::Attribute removeAlignmentAttr();
1608 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::StringAttr sym_name, /*optional*/::mlir::StringAttr sym_visibility, ::mlir::TypeAttr type, /*optional*/::mlir::Attribute initial_value, /*optional*/::mlir::UnitAttr constant, /*optional*/::mlir::IntegerAttr alignment);
1609 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::StringAttr sym_name, /*optional*/::mlir::StringAttr sym_visibility, ::mlir::TypeAttr type, /*optional*/::mlir::Attribute initial_value, /*optional*/::mlir::UnitAttr constant, /*optional*/::mlir::IntegerAttr alignment);
1610 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::llvm::StringRef sym_name, /*optional*/::mlir::StringAttr sym_visibility, ::mlir::MemRefType type, /*optional*/::mlir::Attribute initial_value, /*optional*/bool constant, /*optional*/::mlir::IntegerAttr alignment);
1611 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::llvm::StringRef sym_name, /*optional*/::mlir::StringAttr sym_visibility, ::mlir::MemRefType type, /*optional*/::mlir::Attribute initial_value, /*optional*/bool constant, /*optional*/::mlir::IntegerAttr alignment);
1612 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1613 ::mlir::LogicalResult verify();
1614 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1615 void print(::mlir::OpAsmPrinter &_odsPrinter);
1616private:
1617 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
1618 return getAttributeNameForIndex((*this)->getName(), index);
1619 }
1620
1621 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
1622 assert(index < 6 && "invalid attribute index")(static_cast <bool> (index < 6 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 6 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 1622, __extension__ __PRETTY_FUNCTION__))
;
1623 return name.getRegisteredInfo()->getAttributeNames()[index];
1624 }
1625
1626public:
1627 bool isExternal() { return !initial_value(); }
1628 bool isUninitialized() {
1629 return !isExternal() && initial_value().getValue().isa<UnitAttr>();
1630 }
1631};
1632} // namespace memref
1633} // namespace mlir
1634DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::GlobalOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::GlobalOp>(); } }
1635
1636namespace mlir {
1637namespace memref {
1638
1639//===----------------------------------------------------------------------===//
1640// ::mlir::memref::PrefetchOp declarations
1641//===----------------------------------------------------------------------===//
1642
1643class PrefetchOpAdaptor {
1644public:
1645 PrefetchOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1646
1647 PrefetchOpAdaptor(PrefetchOp &op);
1648
1649 ::mlir::ValueRange getOperands();
1650 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1651 ::mlir::ValueRange getODSOperands(unsigned index);
1652 ::mlir::Value memref();
1653 ::mlir::ValueRange indices();
1654 ::mlir::DictionaryAttr getAttributes();
1655 ::mlir::BoolAttr isWriteAttr();
1656 bool isWrite();
1657 ::mlir::IntegerAttr localityHintAttr();
1658 uint32_t localityHint();
1659 ::mlir::BoolAttr isDataCacheAttr();
1660 bool isDataCache();
1661 ::mlir::LogicalResult verify(::mlir::Location loc);
1662private:
1663 ::mlir::ValueRange odsOperands;
1664 ::mlir::DictionaryAttr odsAttrs;
1665 ::mlir::RegionRange odsRegions;
1666};
1667class PrefetchOp : public ::mlir::Op<PrefetchOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<1>::Impl> {
1668public:
1669 using Op::Op;
1670 using Op::print;
1671 using Adaptor = PrefetchOpAdaptor;
1672public:
1673 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1674 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("isWrite"), ::llvm::StringRef("localityHint"), ::llvm::StringRef("isDataCache")};
1675 return ::llvm::makeArrayRef(attrNames);
1676 }
1677
1678 ::mlir::StringAttr isWriteAttrName() {
1679 return getAttributeNameForIndex(0);
1680 }
1681
1682 static ::mlir::StringAttr isWriteAttrName(::mlir::OperationName name) {
1683 return getAttributeNameForIndex(name, 0);
1684 }
1685
1686 ::mlir::StringAttr localityHintAttrName() {
1687 return getAttributeNameForIndex(1);
1688 }
1689
1690 static ::mlir::StringAttr localityHintAttrName(::mlir::OperationName name) {
1691 return getAttributeNameForIndex(name, 1);
1692 }
1693
1694 ::mlir::StringAttr isDataCacheAttrName() {
1695 return getAttributeNameForIndex(2);
1696 }
1697
1698 static ::mlir::StringAttr isDataCacheAttrName(::mlir::OperationName name) {
1699 return getAttributeNameForIndex(name, 2);
1700 }
1701
1702 static constexpr ::llvm::StringLiteral getOperationName() {
1703 return ::llvm::StringLiteral("memref.prefetch");
1704 }
1705
1706 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1707 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1708 ::mlir::Value memref();
1709 ::mlir::Operation::operand_range indices();
1710 ::mlir::MutableOperandRange memrefMutable();
1711 ::mlir::MutableOperandRange indicesMutable();
1712 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1713 ::mlir::Operation::result_range getODSResults(unsigned index);
1714 ::mlir::BoolAttr isWriteAttr();
1715 bool isWrite();
1716 ::mlir::IntegerAttr localityHintAttr();
1717 uint32_t localityHint();
1718 ::mlir::BoolAttr isDataCacheAttr();
1719 bool isDataCache();
1720 void isWriteAttr(::mlir::BoolAttr attr);
1721 void localityHintAttr(::mlir::IntegerAttr attr);
1722 void isDataCacheAttr(::mlir::BoolAttr attr);
1723 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value memref, ::mlir::ValueRange indices, ::mlir::BoolAttr isWrite, ::mlir::IntegerAttr localityHint, ::mlir::BoolAttr isDataCache);
1724 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value memref, ::mlir::ValueRange indices, ::mlir::BoolAttr isWrite, ::mlir::IntegerAttr localityHint, ::mlir::BoolAttr isDataCache);
1725 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value memref, ::mlir::ValueRange indices, bool isWrite, uint32_t localityHint, bool isDataCache);
1726 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value memref, ::mlir::ValueRange indices, bool isWrite, uint32_t localityHint, bool isDataCache);
1727 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1728 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1729 void print(::mlir::OpAsmPrinter &p);
1730 ::mlir::LogicalResult verify();
1731 ::mlir::LogicalResult fold(::llvm::ArrayRef<::mlir::Attribute> operands, ::llvm::SmallVectorImpl<::mlir::OpFoldResult> &results);
1732private:
1733 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
1734 return getAttributeNameForIndex((*this)->getName(), index);
1735 }
1736
1737 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
1738 assert(index < 3 && "invalid attribute index")(static_cast <bool> (index < 3 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 3 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 1738, __extension__ __PRETTY_FUNCTION__))
;
1739 return name.getRegisteredInfo()->getAttributeNames()[index];
1740 }
1741
1742public:
1743 MemRefType getMemRefType() {
1744 return memref().getType().cast<MemRefType>();
1745 }
1746 static StringRef getLocalityHintAttrName() { return "localityHint"; }
1747 static StringRef getIsWriteAttrName() { return "isWrite"; }
1748 static StringRef getIsDataCacheAttrName() { return "isDataCache"; }
1749};
1750} // namespace memref
1751} // namespace mlir
1752DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::PrefetchOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::PrefetchOp>(); } }
1753
1754namespace mlir {
1755namespace memref {
1756
1757//===----------------------------------------------------------------------===//
1758// ::mlir::memref::RankOp declarations
1759//===----------------------------------------------------------------------===//
1760
1761class RankOpAdaptor {
1762public:
1763 RankOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1764
1765 RankOpAdaptor(RankOp &op);
1766
1767 ::mlir::ValueRange getOperands();
1768 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1769 ::mlir::ValueRange getODSOperands(unsigned index);
1770 ::mlir::Value memref();
1771 ::mlir::DictionaryAttr getAttributes();
1772 ::mlir::LogicalResult verify(::mlir::Location loc);
1773private:
1774 ::mlir::ValueRange odsOperands;
1775 ::mlir::DictionaryAttr odsAttrs;
1776 ::mlir::RegionRange odsRegions;
1777};
1778class RankOp : public ::mlir::Op<RankOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::IndexType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::OneOperand, ::mlir::MemoryEffectOpInterface::Trait> {
1779public:
1780 using Op::Op;
1781 using Op::print;
1782 using Adaptor = RankOpAdaptor;
1783public:
1784 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1785 return {};
1786 }
1787
1788 static constexpr ::llvm::StringLiteral getOperationName() {
1789 return ::llvm::StringLiteral("memref.rank");
1790 }
1791
1792 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1793 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1794 ::mlir::Value memref();
1795 ::mlir::MutableOperandRange memrefMutable();
1796 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1797 ::mlir::Operation::result_range getODSResults(unsigned index);
1798 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type resultType0, ::mlir::Value memref);
1799 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value memref);
1800 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1801 ::mlir::LogicalResult verify();
1802 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
1803 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1804 void print(::mlir::OpAsmPrinter &_odsPrinter);
1805 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
1806public:
1807};
1808} // namespace memref
1809} // namespace mlir
1810DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::RankOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::RankOp>(); } }
1811
1812namespace mlir {
1813namespace memref {
1814
1815//===----------------------------------------------------------------------===//
1816// ::mlir::memref::ReinterpretCastOp declarations
1817//===----------------------------------------------------------------------===//
1818
1819class ReinterpretCastOpAdaptor {
1820public:
1821 ReinterpretCastOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1822
1823 ReinterpretCastOpAdaptor(ReinterpretCastOp &op);
1824
1825 ::mlir::ValueRange getOperands();
1826 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1827 ::mlir::ValueRange getODSOperands(unsigned index);
1828 ::mlir::Value source();
1829 ::mlir::ValueRange offsets();
1830 ::mlir::ValueRange sizes();
1831 ::mlir::ValueRange strides();
1832 ::mlir::DictionaryAttr getAttributes();
1833 ::mlir::ArrayAttr static_offsetsAttr();
1834 ::mlir::ArrayAttr static_offsets();
1835 ::mlir::ArrayAttr static_sizesAttr();
1836 ::mlir::ArrayAttr static_sizes();
1837 ::mlir::ArrayAttr static_stridesAttr();
1838 ::mlir::ArrayAttr static_strides();
1839 ::mlir::LogicalResult verify(::mlir::Location loc);
1840private:
1841 ::mlir::ValueRange odsOperands;
1842 ::mlir::DictionaryAttr odsAttrs;
1843 ::mlir::RegionRange odsRegions;
1844};
1845class ReinterpretCastOp : public ::mlir::Op<ReinterpretCastOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::MemRefType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<1>::Impl, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::OpTrait::AttrSizedOperandSegments, ::mlir::ViewLikeOpInterface::Trait, ::mlir::OffsetSizeAndStrideOpInterface::Trait, ::mlir::OpTrait::MemRefsNormalizable> {
1846public:
1847 using Op::Op;
1848 using Op::print;
1849 using Adaptor = ReinterpretCastOpAdaptor;
1850public:
1851 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
1852 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("static_offsets"), ::llvm::StringRef("static_sizes"), ::llvm::StringRef("static_strides"), ::llvm::StringRef("operand_segment_sizes")};
1853 return ::llvm::makeArrayRef(attrNames);
1854 }
1855
1856 ::mlir::StringAttr static_offsetsAttrName() {
1857 return getAttributeNameForIndex(0);
1858 }
1859
1860 static ::mlir::StringAttr static_offsetsAttrName(::mlir::OperationName name) {
1861 return getAttributeNameForIndex(name, 0);
1862 }
1863
1864 ::mlir::StringAttr static_sizesAttrName() {
1865 return getAttributeNameForIndex(1);
1866 }
1867
1868 static ::mlir::StringAttr static_sizesAttrName(::mlir::OperationName name) {
1869 return getAttributeNameForIndex(name, 1);
1870 }
1871
1872 ::mlir::StringAttr static_stridesAttrName() {
1873 return getAttributeNameForIndex(2);
1874 }
1875
1876 static ::mlir::StringAttr static_stridesAttrName(::mlir::OperationName name) {
1877 return getAttributeNameForIndex(name, 2);
1878 }
1879
1880 ::mlir::StringAttr operand_segment_sizesAttrName() {
1881 return getAttributeNameForIndex(3);
1882 }
1883
1884 static ::mlir::StringAttr operand_segment_sizesAttrName(::mlir::OperationName name) {
1885 return getAttributeNameForIndex(name, 3);
1886 }
1887
1888 static constexpr ::llvm::StringLiteral getOperationName() {
1889 return ::llvm::StringLiteral("memref.reinterpret_cast");
1890 }
1891
1892 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1893 ::mlir::Operation::operand_range getODSOperands(unsigned index);
1894 ::mlir::Value source();
1895 ::mlir::Operation::operand_range offsets();
1896 ::mlir::Operation::operand_range sizes();
1897 ::mlir::Operation::operand_range strides();
1898 ::mlir::MutableOperandRange sourceMutable();
1899 ::mlir::MutableOperandRange offsetsMutable();
1900 ::mlir::MutableOperandRange sizesMutable();
1901 ::mlir::MutableOperandRange stridesMutable();
1902 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
1903 ::mlir::Operation::result_range getODSResults(unsigned index);
1904 ::mlir::Value result();
1905 ::mlir::ArrayAttr static_offsetsAttr();
1906 ::mlir::ArrayAttr static_offsets();
1907 ::mlir::ArrayAttr static_sizesAttr();
1908 ::mlir::ArrayAttr static_sizes();
1909 ::mlir::ArrayAttr static_stridesAttr();
1910 ::mlir::ArrayAttr static_strides();
1911 void static_offsetsAttr(::mlir::ArrayAttr attr);
1912 void static_sizesAttr(::mlir::ArrayAttr attr);
1913 void static_stridesAttr(::mlir::ArrayAttr attr);
1914 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType resultType, Value source, OpFoldResult offset, ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides, ArrayRef<NamedAttribute> attrs = {});
1915 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType resultType, Value source, int64_t offset, ArrayRef<int64_t> sizes, ArrayRef<int64_t> strides, ArrayRef<NamedAttribute> attrs = {});
1916 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType resultType, Value source, Value offset, ValueRange sizes, ValueRange strides, ArrayRef<NamedAttribute> attrs = {});
1917 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::Value source, ::mlir::ValueRange offsets, ::mlir::ValueRange sizes, ::mlir::ValueRange strides, ::mlir::ArrayAttr static_offsets, ::mlir::ArrayAttr static_sizes, ::mlir::ArrayAttr static_strides);
1918 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value source, ::mlir::ValueRange offsets, ::mlir::ValueRange sizes, ::mlir::ValueRange strides, ::mlir::ArrayAttr static_offsets, ::mlir::ArrayAttr static_sizes, ::mlir::ArrayAttr static_strides);
1919 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
1920 ::mlir::LogicalResult verify();
1921 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
1922 void print(::mlir::OpAsmPrinter &_odsPrinter);
1923 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
1924private:
1925 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
1926 return getAttributeNameForIndex((*this)->getName(), index);
1927 }
1928
1929 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
1930 assert(index < 4 && "invalid attribute index")(static_cast <bool> (index < 4 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 4 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 1930, __extension__ __PRETTY_FUNCTION__))
;
1931 return name.getRegisteredInfo()->getAttributeNames()[index];
1932 }
1933
1934public:
1935 /// Returns the dynamic sizes for this subview operation if specified.
1936 ::mlir::Operation::operand_range getDynamicSizes() { return sizes(); }
1937
1938 /// Return the list of Range (i.e. offset, size, stride). Each
1939 /// Range entry contains either the dynamic value or a ConstantIndexOp
1940 /// constructed with `b` at location `loc`.
1941 ::mlir::SmallVector<::mlir::Range, 8> getOrCreateRanges(
1942 ::mlir::OpBuilder &b, ::mlir::Location loc) {
1943 return ::mlir::getOrCreateRanges(*this, b, loc);
1944 }
1945
1946 // The result of the op is always a ranked memref.
1947 MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
1948 Value getViewSource() { return source(); }
1949
1950 /// Return the rank of the source ShapedType.
1951 unsigned getResultRank() {
1952 return getResult().getType().cast<ShapedType>().getRank();
1953 }
1954
1955 /// Return the expected rank of each of the`static_offsets`, `static_sizes`
1956 /// and `static_strides` attributes.
1957 std::array<unsigned, 3> getArrayAttrMaxRanks() {
1958 unsigned resultRank = getResult().getType().cast<ShapedType>().getRank();
1959 return {1, resultRank, resultRank};
1960 }
1961
1962 /// Return the number of leading operands before the `offsets`, `sizes` and
1963 /// and `strides` operands.
1964 static unsigned getOffsetSizeAndStrideStartOperandIndex() { return 1; }
1965};
1966} // namespace memref
1967} // namespace mlir
1968DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::ReinterpretCastOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::ReinterpretCastOp>(); } }
1969
1970namespace mlir {
1971namespace memref {
1972
1973//===----------------------------------------------------------------------===//
1974// ::mlir::memref::ReshapeOp declarations
1975//===----------------------------------------------------------------------===//
1976
1977class ReshapeOpAdaptor {
1978public:
1979 ReshapeOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
1980
1981 ReshapeOpAdaptor(ReshapeOp &op);
1982
1983 ::mlir::ValueRange getOperands();
1984 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
1985 ::mlir::ValueRange getODSOperands(unsigned index);
1986 ::mlir::Value source();
1987 ::mlir::Value shape();
1988 ::mlir::DictionaryAttr getAttributes();
1989 ::mlir::LogicalResult verify(::mlir::Location loc);
1990private:
1991 ::mlir::ValueRange odsOperands;
1992 ::mlir::DictionaryAttr odsAttrs;
1993 ::mlir::RegionRange odsRegions;
1994};
1995class ReshapeOp : public ::mlir::Op<ReshapeOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::Type>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::NOperands<2>::Impl, ::mlir::ViewLikeOpInterface::Trait, ::mlir::MemoryEffectOpInterface::Trait> {
1996public:
1997 using Op::Op;
1998 using Op::print;
1999 using Adaptor = ReshapeOpAdaptor;
2000public:
2001 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
2002 return {};
2003 }
2004
2005 static constexpr ::llvm::StringLiteral getOperationName() {
2006 return ::llvm::StringLiteral("memref.reshape");
2007 }
2008
2009 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2010 ::mlir::Operation::operand_range getODSOperands(unsigned index);
2011 ::mlir::Value source();
2012 ::mlir::Value shape();
2013 ::mlir::MutableOperandRange sourceMutable();
2014 ::mlir::MutableOperandRange shapeMutable();
2015 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
2016 ::mlir::Operation::result_range getODSResults(unsigned index);
2017 ::mlir::Value result();
2018 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType resultType, Value operand, Value shape);
2019 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::Value source, ::mlir::Value shape);
2020 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value source, ::mlir::Value shape);
2021 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
2022 ::mlir::LogicalResult verify();
2023 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
2024 void print(::mlir::OpAsmPrinter &_odsPrinter);
2025 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
2026public:
2027 MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
2028 Value getViewSource() { return source(); }
2029};
2030} // namespace memref
2031} // namespace mlir
2032DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::ReshapeOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::ReshapeOp>(); } }
2033
2034namespace mlir {
2035namespace memref {
2036
2037//===----------------------------------------------------------------------===//
2038// ::mlir::memref::StoreOp declarations
2039//===----------------------------------------------------------------------===//
2040
2041class StoreOpAdaptor {
2042public:
2043 StoreOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
2044
2045 StoreOpAdaptor(StoreOp &op);
2046
2047 ::mlir::ValueRange getOperands();
2048 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2049 ::mlir::ValueRange getODSOperands(unsigned index);
2050 ::mlir::Value value();
2051 ::mlir::Value memref();
2052 ::mlir::ValueRange indices();
2053 ::mlir::DictionaryAttr getAttributes();
2054 ::mlir::LogicalResult verify(::mlir::Location loc);
2055private:
2056 ::mlir::ValueRange odsOperands;
2057 ::mlir::DictionaryAttr odsAttrs;
2058 ::mlir::RegionRange odsRegions;
2059};
2060class StoreOp : public ::mlir::Op<StoreOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<2>::Impl, ::mlir::OpTrait::MemRefsNormalizable, ::mlir::MemoryEffectOpInterface::Trait> {
2061public:
2062 using Op::Op;
2063 using Op::print;
2064 using Adaptor = StoreOpAdaptor;
2065public:
2066 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
2067 return {};
2068 }
2069
2070 static constexpr ::llvm::StringLiteral getOperationName() {
2071 return ::llvm::StringLiteral("memref.store");
2072 }
2073
2074 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2075 ::mlir::Operation::operand_range getODSOperands(unsigned index);
2076 ::mlir::Value value();
2077 ::mlir::Value memref();
2078 ::mlir::Operation::operand_range indices();
2079 ::mlir::MutableOperandRange valueMutable();
2080 ::mlir::MutableOperandRange memrefMutable();
2081 ::mlir::MutableOperandRange indicesMutable();
2082 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
2083 ::mlir::Operation::result_range getODSResults(unsigned index);
2084 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value valueToStore, Value memref);
2085 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value value, ::mlir::Value memref, ::mlir::ValueRange indices);
2086 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value value, ::mlir::Value memref, ::mlir::ValueRange indices);
2087 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
2088 ::mlir::LogicalResult verify();
2089 ::mlir::LogicalResult fold(::llvm::ArrayRef<::mlir::Attribute> operands, ::llvm::SmallVectorImpl<::mlir::OpFoldResult> &results);
2090 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
2091 void print(::mlir::OpAsmPrinter &_odsPrinter);
2092 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
2093public:
2094 Value getValueToStore() { return getOperand(0); }
2095
2096 Value getMemRef() { return getOperand(1); }
2097 void setMemRef(Value value) { setOperand(1, value); }
2098 MemRefType getMemRefType() {
2099 return getMemRef().getType().cast<MemRefType>();
2100 }
2101
2102 operand_range getIndices() {
2103 return {operand_begin() + 2, operand_end()};
2104 }
2105};
2106} // namespace memref
2107} // namespace mlir
2108DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::StoreOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::StoreOp>(); } }
2109
2110namespace mlir {
2111namespace memref {
2112
2113//===----------------------------------------------------------------------===//
2114// ::mlir::memref::TransposeOp declarations
2115//===----------------------------------------------------------------------===//
2116
2117class TransposeOpAdaptor {
2118public:
2119 TransposeOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
2120
2121 TransposeOpAdaptor(TransposeOp &op);
2122
2123 ::mlir::ValueRange getOperands();
2124 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2125 ::mlir::ValueRange getODSOperands(unsigned index);
2126 ::mlir::Value in();
2127 ::mlir::DictionaryAttr getAttributes();
2128 ::mlir::AffineMapAttr permutationAttr();
2129 ::mlir::AffineMap permutation();
2130 ::mlir::LogicalResult verify(::mlir::Location loc);
2131private:
2132 ::mlir::ValueRange odsOperands;
2133 ::mlir::DictionaryAttr odsAttrs;
2134 ::mlir::RegionRange odsRegions;
2135};
2136class TransposeOp : public ::mlir::Op<TransposeOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::Type>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::OneOperand, ::mlir::MemoryEffectOpInterface::Trait> {
2137public:
2138 using Op::Op;
2139 using Op::print;
2140 using Adaptor = TransposeOpAdaptor;
2141public:
2142 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
2143 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("permutation")};
2144 return ::llvm::makeArrayRef(attrNames);
2145 }
2146
2147 ::mlir::StringAttr permutationAttrName() {
2148 return getAttributeNameForIndex(0);
2149 }
2150
2151 static ::mlir::StringAttr permutationAttrName(::mlir::OperationName name) {
2152 return getAttributeNameForIndex(name, 0);
2153 }
2154
2155 static constexpr ::llvm::StringLiteral getOperationName() {
2156 return ::llvm::StringLiteral("memref.transpose");
2157 }
2158
2159 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2160 ::mlir::Operation::operand_range getODSOperands(unsigned index);
2161 ::mlir::Value in();
2162 ::mlir::MutableOperandRange inMutable();
2163 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
2164 ::mlir::Operation::result_range getODSResults(unsigned index);
2165 ::mlir::AffineMapAttr permutationAttr();
2166 ::mlir::AffineMap permutation();
2167 void permutationAttr(::mlir::AffineMapAttr attr);
2168 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value in, AffineMapAttr permutation, ArrayRef<NamedAttribute> attrs = {});
2169 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type resultType0, ::mlir::Value in, ::mlir::AffineMapAttr permutation);
2170 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value in, ::mlir::AffineMapAttr permutation);
2171 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type resultType0, ::mlir::Value in, ::mlir::AffineMap permutation);
2172 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value in, ::mlir::AffineMap permutation);
2173 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
2174 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
2175 void print(::mlir::OpAsmPrinter &p);
2176 ::mlir::LogicalResult verify();
2177 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
2178 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
2179private:
2180 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
2181 return getAttributeNameForIndex((*this)->getName(), index);
2182 }
2183
2184 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
2185 assert(index < 1 && "invalid attribute index")(static_cast <bool> (index < 1 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 1 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 2185, __extension__ __PRETTY_FUNCTION__))
;
2186 return name.getRegisteredInfo()->getAttributeNames()[index];
2187 }
2188
2189public:
2190 static StringRef getPermutationAttrName() { return "permutation"; }
2191 ShapedType getShapedType() { return in().getType().cast<ShapedType>(); }
2192};
2193} // namespace memref
2194} // namespace mlir
2195DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::TransposeOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::TransposeOp>(); } }
2196
2197namespace mlir {
2198namespace memref {
2199
2200//===----------------------------------------------------------------------===//
2201// ::mlir::memref::ViewOp declarations
2202//===----------------------------------------------------------------------===//
2203
2204class ViewOpAdaptor {
2205public:
2206 ViewOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
2207
2208 ViewOpAdaptor(ViewOp &op);
2209
2210 ::mlir::ValueRange getOperands();
2211 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2212 ::mlir::ValueRange getODSOperands(unsigned index);
2213 ::mlir::Value source();
2214 ::mlir::Value byte_shift();
2215 ::mlir::ValueRange sizes();
2216 ::mlir::DictionaryAttr getAttributes();
2217 ::mlir::LogicalResult verify(::mlir::Location loc);
2218private:
2219 ::mlir::ValueRange odsOperands;
2220 ::mlir::DictionaryAttr odsAttrs;
2221 ::mlir::RegionRange odsRegions;
2222};
2223class ViewOp : public ::mlir::Op<ViewOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::MemRefType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<2>::Impl, ::mlir::ViewLikeOpInterface::Trait, ::mlir::MemoryEffectOpInterface::Trait> {
2224public:
2225 using Op::Op;
2226 using Op::print;
2227 using Adaptor = ViewOpAdaptor;
2228public:
2229 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
2230 return {};
2231 }
2232
2233 static constexpr ::llvm::StringLiteral getOperationName() {
2234 return ::llvm::StringLiteral("memref.view");
2235 }
2236
2237 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2238 ::mlir::Operation::operand_range getODSOperands(unsigned index);
2239 ::mlir::Value source();
2240 ::mlir::Value byte_shift();
2241 ::mlir::Operation::operand_range sizes();
2242 ::mlir::MutableOperandRange sourceMutable();
2243 ::mlir::MutableOperandRange byte_shiftMutable();
2244 ::mlir::MutableOperandRange sizesMutable();
2245 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
2246 ::mlir::Operation::result_range getODSResults(unsigned index);
2247 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type resultType0, ::mlir::Value source, ::mlir::Value byte_shift, ::mlir::ValueRange sizes);
2248 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value source, ::mlir::Value byte_shift, ::mlir::ValueRange sizes);
2249 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
2250 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
2251 void print(::mlir::OpAsmPrinter &p);
2252 ::mlir::LogicalResult verify();
2253 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
2254 ::mlir::Value getViewSource();
2255 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
2256public:
2257 /// The result of a view is always a memref.
2258 MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
2259
2260 /// Returns the dynamic sizes for this view operation. This is redundant
2261 /// with `sizes` but needed in template implementations. More specifically:
2262 /// ```
2263 /// template <typename AnyMemRefDefOp>
2264 /// bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
2265 /// Region *region)
2266 /// ```
2267 operand_range getDynamicSizes() {
2268 return {sizes().begin(), sizes().end()};
2269 }
2270};
2271} // namespace memref
2272} // namespace mlir
2273DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::ViewOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::ViewOp>(); } }
2274
2275namespace mlir {
2276namespace memref {
2277
2278//===----------------------------------------------------------------------===//
2279// ::mlir::memref::SubViewOp declarations
2280//===----------------------------------------------------------------------===//
2281
2282class SubViewOpAdaptor {
2283public:
2284 SubViewOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
2285
2286 SubViewOpAdaptor(SubViewOp &op);
2287
2288 ::mlir::ValueRange getOperands();
2289 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2290 ::mlir::ValueRange getODSOperands(unsigned index);
2291 ::mlir::Value source();
2292 ::mlir::ValueRange offsets();
2293 ::mlir::ValueRange sizes();
2294 ::mlir::ValueRange strides();
2295 ::mlir::DictionaryAttr getAttributes();
2296 ::mlir::ArrayAttr static_offsetsAttr();
2297 ::mlir::ArrayAttr static_offsets();
2298 ::mlir::ArrayAttr static_sizesAttr();
2299 ::mlir::ArrayAttr static_sizes();
2300 ::mlir::ArrayAttr static_stridesAttr();
2301 ::mlir::ArrayAttr static_strides();
2302 ::mlir::LogicalResult verify(::mlir::Location loc);
2303private:
2304 ::mlir::ValueRange odsOperands;
2305 ::mlir::DictionaryAttr odsAttrs;
2306 ::mlir::RegionRange odsRegions;
2307};
2308class SubViewOp : public ::mlir::Op<SubViewOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::OneResult, ::mlir::OpTrait::OneTypedResult<::mlir::MemRefType>::Impl, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::AtLeastNOperands<1>::Impl, ::mlir::ViewLikeOpInterface::Trait, ::mlir::MemoryEffectOpInterface::Trait, ::mlir::OpTrait::AttrSizedOperandSegments, ::mlir::OffsetSizeAndStrideOpInterface::Trait> {
2309public:
2310 using Op::Op;
2311 using Op::print;
2312 using Adaptor = SubViewOpAdaptor;
2313public:
2314 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
2315 static ::llvm::StringRef attrNames[] = {::llvm::StringRef("static_offsets"), ::llvm::StringRef("static_sizes"), ::llvm::StringRef("static_strides"), ::llvm::StringRef("operand_segment_sizes")};
2316 return ::llvm::makeArrayRef(attrNames);
2317 }
2318
2319 ::mlir::StringAttr static_offsetsAttrName() {
2320 return getAttributeNameForIndex(0);
2321 }
2322
2323 static ::mlir::StringAttr static_offsetsAttrName(::mlir::OperationName name) {
2324 return getAttributeNameForIndex(name, 0);
2325 }
2326
2327 ::mlir::StringAttr static_sizesAttrName() {
2328 return getAttributeNameForIndex(1);
2329 }
2330
2331 static ::mlir::StringAttr static_sizesAttrName(::mlir::OperationName name) {
2332 return getAttributeNameForIndex(name, 1);
2333 }
2334
2335 ::mlir::StringAttr static_stridesAttrName() {
2336 return getAttributeNameForIndex(2);
2337 }
2338
2339 static ::mlir::StringAttr static_stridesAttrName(::mlir::OperationName name) {
2340 return getAttributeNameForIndex(name, 2);
2341 }
2342
2343 ::mlir::StringAttr operand_segment_sizesAttrName() {
2344 return getAttributeNameForIndex(3);
2345 }
2346
2347 static ::mlir::StringAttr operand_segment_sizesAttrName(::mlir::OperationName name) {
2348 return getAttributeNameForIndex(name, 3);
2349 }
2350
2351 static constexpr ::llvm::StringLiteral getOperationName() {
2352 return ::llvm::StringLiteral("memref.subview");
2353 }
2354
2355 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2356 ::mlir::Operation::operand_range getODSOperands(unsigned index);
2357 ::mlir::Value source();
2358 ::mlir::Operation::operand_range offsets();
2359 ::mlir::Operation::operand_range sizes();
2360 ::mlir::Operation::operand_range strides();
2361 ::mlir::MutableOperandRange sourceMutable();
2362 ::mlir::MutableOperandRange offsetsMutable();
2363 ::mlir::MutableOperandRange sizesMutable();
2364 ::mlir::MutableOperandRange stridesMutable();
2365 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
2366 ::mlir::Operation::result_range getODSResults(unsigned index);
2367 ::mlir::Value result();
2368 ::mlir::ArrayAttr static_offsetsAttr();
2369 ::mlir::ArrayAttr static_offsets();
2370 ::mlir::ArrayAttr static_sizesAttr();
2371 ::mlir::ArrayAttr static_sizes();
2372 ::mlir::ArrayAttr static_stridesAttr();
2373 ::mlir::ArrayAttr static_strides();
2374 void static_offsetsAttr(::mlir::ArrayAttr attr);
2375 void static_sizesAttr(::mlir::ArrayAttr attr);
2376 void static_stridesAttr(::mlir::ArrayAttr attr);
2377 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value source, ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides, ArrayRef<NamedAttribute> attrs = {});
2378 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType resultType, Value source, ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides, ArrayRef<NamedAttribute> attrs = {});
2379 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value source, ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, ArrayRef<int64_t> strides, ArrayRef<NamedAttribute> attrs = {});
2380 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType resultType, Value source, ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, ArrayRef<int64_t> strides, ArrayRef<NamedAttribute> attrs = {});
2381 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, Value source, ValueRange offsets, ValueRange sizes, ValueRange strides, ArrayRef<NamedAttribute> attrs = {});
2382 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, MemRefType resultType, Value source, ValueRange offsets, ValueRange sizes, ValueRange strides, ArrayRef<NamedAttribute> attrs = {});
2383 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Type result, ::mlir::Value source, ::mlir::ValueRange offsets, ::mlir::ValueRange sizes, ::mlir::ValueRange strides, ::mlir::ArrayAttr static_offsets, ::mlir::ArrayAttr static_sizes, ::mlir::ArrayAttr static_strides);
2384 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value source, ::mlir::ValueRange offsets, ::mlir::ValueRange sizes, ::mlir::ValueRange strides, ::mlir::ArrayAttr static_offsets, ::mlir::ArrayAttr static_sizes, ::mlir::ArrayAttr static_strides);
2385 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
2386 ::mlir::LogicalResult verify();
2387 static void getCanonicalizationPatterns(::mlir::RewritePatternSet &results, ::mlir::MLIRContext *context);
2388 ::mlir::OpFoldResult fold(::llvm::ArrayRef<::mlir::Attribute> operands);
2389 ::mlir::Value getViewSource();
2390 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
2391 void print(::mlir::OpAsmPrinter &_odsPrinter);
2392 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
2393private:
2394 ::mlir::StringAttr getAttributeNameForIndex(unsigned index) {
2395 return getAttributeNameForIndex((*this)->getName(), index);
2396 }
2397
2398 static ::mlir::StringAttr getAttributeNameForIndex(::mlir::OperationName name, unsigned index) {
2399 assert(index < 4 && "invalid attribute index")(static_cast <bool> (index < 4 && "invalid attribute index"
) ? void (0) : __assert_fail ("index < 4 && \"invalid attribute index\""
, "tools/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.h.inc"
, 2399, __extension__ __PRETTY_FUNCTION__))
;
2400 return name.getRegisteredInfo()->getAttributeNames()[index];
2401 }
2402
2403public:
2404 /// Returns the dynamic sizes for this subview operation if specified.
2405 ::mlir::Operation::operand_range getDynamicSizes() { return sizes(); }
2406
2407 /// Return the list of Range (i.e. offset, size, stride). Each
2408 /// Range entry contains either the dynamic value or a ConstantIndexOp
2409 /// constructed with `b` at location `loc`.
2410 ::mlir::SmallVector<::mlir::Range, 8> getOrCreateRanges(
2411 ::mlir::OpBuilder &b, ::mlir::Location loc) {
2412 return ::mlir::getOrCreateRanges(*this, b, loc);
2413 }
2414
2415 /// Returns the type of the base memref operand.
2416 MemRefType getSourceType() {
2417 return source().getType().cast<MemRefType>();
2418 }
2419
2420 /// The result of a subview is always a memref.
2421 MemRefType getType() { return getResult().getType().cast<MemRefType>(); }
2422
2423 /// A subview result type can be fully inferred from the source type and the
2424 /// static representation of offsets, sizes and strides. Special sentinels
2425 /// encode the dynamic case.
2426 static Type inferResultType(MemRefType sourceMemRefType,
2427 ArrayRef<int64_t> staticOffsets,
2428 ArrayRef<int64_t> staticSizes,
2429 ArrayRef<int64_t> staticStrides);
2430 static Type inferResultType(MemRefType sourceMemRefType,
2431 ArrayRef<OpFoldResult> staticOffsets,
2432 ArrayRef<OpFoldResult> staticSizes,
2433 ArrayRef<OpFoldResult> staticStrides);
2434 static Type inferRankReducedResultType(unsigned resultRank,
2435 MemRefType sourceMemRefType,
2436 ArrayRef<int64_t> staticOffsets,
2437 ArrayRef<int64_t> staticSizes,
2438 ArrayRef<int64_t> staticStrides);
2439 static Type inferRankReducedResultType(unsigned resultRank,
2440 MemRefType sourceMemRefType,
2441 ArrayRef<OpFoldResult> staticOffsets,
2442 ArrayRef<OpFoldResult> staticSizes,
2443 ArrayRef<OpFoldResult> staticStrides);
2444
2445 /// Return the expected rank of each of the`static_offsets`, `static_sizes`
2446 /// and `static_strides` attributes.
2447 std::array<unsigned, 3> getArrayAttrMaxRanks() {
2448 unsigned rank = getSourceType().getRank();
2449 return {rank, rank, rank};
2450 }
2451
2452 /// Return the number of leading operands before the `offsets`, `sizes` and
2453 /// and `strides` operands.
2454 static unsigned getOffsetSizeAndStrideStartOperandIndex() { return 1; }
2455
2456 /// Return the dimensions of the source type that are dropped when
2457 /// the result is rank-reduced.
2458 llvm::SmallDenseSet<unsigned> getDroppedDims();
2459};
2460} // namespace memref
2461} // namespace mlir
2462DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::SubViewOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::SubViewOp>(); } }
2463
2464namespace mlir {
2465namespace memref {
2466
2467//===----------------------------------------------------------------------===//
2468// ::mlir::memref::TensorStoreOp declarations
2469//===----------------------------------------------------------------------===//
2470
2471class TensorStoreOpAdaptor {
2472public:
2473 TensorStoreOpAdaptor(::mlir::ValueRange values, ::mlir::DictionaryAttr attrs = nullptr, ::mlir::RegionRange regions = {});
2474
2475 TensorStoreOpAdaptor(TensorStoreOp &op);
2476
2477 ::mlir::ValueRange getOperands();
2478 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2479 ::mlir::ValueRange getODSOperands(unsigned index);
2480 ::mlir::Value tensor();
2481 ::mlir::Value memref();
2482 ::mlir::DictionaryAttr getAttributes();
2483 ::mlir::LogicalResult verify(::mlir::Location loc);
2484private:
2485 ::mlir::ValueRange odsOperands;
2486 ::mlir::DictionaryAttr odsAttrs;
2487 ::mlir::RegionRange odsRegions;
2488};
2489class TensorStoreOp : public ::mlir::Op<TensorStoreOp, ::mlir::OpTrait::ZeroRegion, ::mlir::OpTrait::ZeroResult, ::mlir::OpTrait::ZeroSuccessor, ::mlir::OpTrait::NOperands<2>::Impl, ::mlir::OpTrait::SameOperandsShape, ::mlir::OpTrait::SameOperandsElementType, ::mlir::MemoryEffectOpInterface::Trait> {
2490public:
2491 using Op::Op;
2492 using Op::print;
2493 using Adaptor = TensorStoreOpAdaptor;
2494public:
2495 static ::llvm::ArrayRef<::llvm::StringRef> getAttributeNames() {
2496 return {};
2497 }
2498
2499 static constexpr ::llvm::StringLiteral getOperationName() {
2500 return ::llvm::StringLiteral("memref.tensor_store");
2501 }
2502
2503 std::pair<unsigned, unsigned> getODSOperandIndexAndLength(unsigned index);
2504 ::mlir::Operation::operand_range getODSOperands(unsigned index);
2505 ::mlir::Value tensor();
2506 ::mlir::Value memref();
2507 ::mlir::MutableOperandRange tensorMutable();
2508 ::mlir::MutableOperandRange memrefMutable();
2509 std::pair<unsigned, unsigned> getODSResultIndexAndLength(unsigned index);
2510 ::mlir::Operation::result_range getODSResults(unsigned index);
2511 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::Value tensor, ::mlir::Value memref);
2512 static void build(::mlir::OpBuilder &odsBuilder, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::Value tensor, ::mlir::Value memref);
2513 static void build(::mlir::OpBuilder &, ::mlir::OperationState &odsState, ::mlir::TypeRange resultTypes, ::mlir::ValueRange operands, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes = {});
2514 ::mlir::LogicalResult verify();
2515 static ::mlir::ParseResult parse(::mlir::OpAsmParser &parser, ::mlir::OperationState &result);
2516 void print(::mlir::OpAsmPrinter &_odsPrinter);
2517 void getEffects(::mlir::SmallVectorImpl<::mlir::SideEffects::EffectInstance<::mlir::MemoryEffects::Effect>> &effects);
2518public:
2519};
2520} // namespace memref
2521} // namespace mlir
2522DECLARE_EXPLICIT_TYPE_ID(::mlir::memref::TensorStoreOp)namespace mlir { namespace detail { template <> __attribute__
((visibility("default"))) TypeID TypeIDExported::get< ::mlir
::memref::TensorStoreOp>(); } }
2523
2524
2525#endif // GET_OP_CLASSES
2526

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/mlir/include/mlir/IR/Value.h

1//===- Value.h - Base of the SSA Value hierarchy ----------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines generic Value type and manipulation utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef MLIR_IR_VALUE_H
14#define MLIR_IR_VALUE_H
15
16#include "mlir/IR/Types.h"
17#include "mlir/IR/UseDefLists.h"
18#include "mlir/Support/LLVM.h"
19#include "llvm/Support/PointerLikeTypeTraits.h"
20
21namespace mlir {
22class AsmState;
23class Block;
24class BlockArgument;
25class Operation;
26class OpOperand;
27class OpResult;
28class Region;
29class Value;
30
31//===----------------------------------------------------------------------===//
32// Value
33//===----------------------------------------------------------------------===//
34
35namespace detail {
36
37/// The base class for all derived Value classes. It contains all of the
38/// components that are shared across Value classes.
39class alignas(8) ValueImpl : public IRObjectWithUseList<OpOperand> {
40public:
41 /// The enumeration represents the various different kinds of values the
42 /// internal representation may take. We use all of the bits from Type that we
43 /// can to store indices inline.
44 enum class Kind {
45 /// The first N kinds are all inline operation results. An inline operation
46 /// result means that the kind represents the result number. This removes
47 /// the need to store an additional index value. The derived class here is
48 /// an `OpResultImpl`.
49 InlineOpResult = 0,
50
51 /// The next kind represents a 'out-of-line' operation result. This is for
52 /// results with numbers larger than we can represent inline. The derived
53 /// class here is an `OpResultImpl`.
54 OutOfLineOpResult = 6,
55
56 /// The last kind represents a block argument. The derived class here is an
57 /// `BlockArgumentImpl`.
58 BlockArgument = 7
59 };
60
61 /// Return the type of this value.
62 Type getType() const { return typeAndKind.getPointer(); }
63
64 /// Set the type of this value.
65 void setType(Type type) { return typeAndKind.setPointer(type); }
66
67 /// Return the kind of this value.
68 Kind getKind() const { return typeAndKind.getInt(); }
69
70protected:
71 ValueImpl(Type type, Kind kind) : typeAndKind(type, kind) {}
72
73 /// The type of this result and the kind.
74 llvm::PointerIntPair<Type, 3, Kind> typeAndKind;
75};
76} // namespace detail
77
78/// This class represents an instance of an SSA value in the MLIR system,
79/// representing a computable value that has a type and a set of users. An SSA
80/// value is either a BlockArgument or the result of an operation. Note: This
81/// class has value-type semantics and is just a simple wrapper around a
82/// ValueImpl that is either owner by a block(in the case of a BlockArgument) or
83/// an Operation(in the case of an OpResult).
84class Value {
85public:
86 constexpr Value(detail::ValueImpl *impl = nullptr) : impl(impl) {}
25
Null pointer value stored to field 'impl'
87
88 template <typename U>
89 bool isa() const {
90 assert(*this && "isa<> used on a null type.")(static_cast <bool> (*this && "isa<> used on a null type."
) ? void (0) : __assert_fail ("*this && \"isa<> used on a null type.\""
, "mlir/include/mlir/IR/Value.h", 90, __extension__ __PRETTY_FUNCTION__
))
;
91 return U::classof(*this);
92 }
93
94 template <typename First, typename Second, typename... Rest>
95 bool isa() const {
96 return isa<First>() || isa<Second, Rest...>();
97 }
98 template <typename U>
99 U dyn_cast() const {
100 return isa<U>() ? U(impl) : U(nullptr);
101 }
102 template <typename U>
103 U dyn_cast_or_null() const {
104 return (*this && isa<U>()) ? U(impl) : U(nullptr);
105 }
106 template <typename U>
107 U cast() const {
108 assert(isa<U>())(static_cast <bool> (isa<U>()) ? void (0) : __assert_fail
("isa<U>()", "mlir/include/mlir/IR/Value.h", 108, __extension__
__PRETTY_FUNCTION__))
;
109 return U(impl);
110 }
111
112 explicit operator bool() const { return impl; }
113 bool operator==(const Value &other) const { return impl == other.impl; }
114 bool operator!=(const Value &other) const { return !(*this == other); }
115
116 /// Return the type of this value.
117 Type getType() const { return impl->getType(); }
29
Called C++ object pointer is null
118
119 /// Utility to get the associated MLIRContext that this value is defined in.
120 MLIRContext *getContext() const { return getType().getContext(); }
121
122 /// Mutate the type of this Value to be of the specified type.
123 ///
124 /// Note that this is an extremely dangerous operation which can create
125 /// completely invalid IR very easily. It is strongly recommended that you
126 /// recreate IR objects with the right types instead of mutating them in
127 /// place.
128 void setType(Type newType) { impl->setType(newType); }
129
130 /// If this value is the result of an operation, return the operation that
131 /// defines it.
132 Operation *getDefiningOp() const;
133
134 /// If this value is the result of an operation of type OpTy, return the
135 /// operation that defines it.
136 template <typename OpTy>
137 OpTy getDefiningOp() const {
138 return llvm::dyn_cast_or_null<OpTy>(getDefiningOp());
139 }
140
141 /// Return the location of this value.
142 Location getLoc() const;
143 void setLoc(Location loc);
144
145 /// Return the Region in which this Value is defined.
146 Region *getParentRegion();
147
148 /// Return the Block in which this Value is defined.
149 Block *getParentBlock();
150
151 //===--------------------------------------------------------------------===//
152 // UseLists
153 //===--------------------------------------------------------------------===//
154
155 /// Drop all uses of this object from their respective owners.
156 void dropAllUses() const { return impl->dropAllUses(); }
157
158 /// Replace all uses of 'this' value with the new value, updating anything in
159 /// the IR that uses 'this' to use the other value instead. When this returns
160 /// there are zero uses of 'this'.
161 void replaceAllUsesWith(Value newValue) const {
162 impl->replaceAllUsesWith(newValue);
163 }
164
165 /// Replace all uses of 'this' value with 'newValue', updating anything in the
166 /// IR that uses 'this' to use the other value instead except if the user is
167 /// listed in 'exceptions' .
168 void
169 replaceAllUsesExcept(Value newValue,
170 const SmallPtrSetImpl<Operation *> &exceptions) const;
171
172 /// Replace all uses of 'this' value with 'newValue', updating anything in the
173 /// IR that uses 'this' to use the other value instead except if the user is
174 /// 'exceptedUser'.
175 void replaceAllUsesExcept(Value newValue, Operation *exceptedUser) const;
176
177 /// Replace all uses of 'this' value with 'newValue' if the given callback
178 /// returns true.
179 void replaceUsesWithIf(Value newValue,
180 function_ref<bool(OpOperand &)> shouldReplace);
181
182 /// Returns true if the value is used outside of the given block.
183 bool isUsedOutsideOfBlock(Block *block);
184
185 //===--------------------------------------------------------------------===//
186 // Uses
187
188 /// This class implements an iterator over the uses of a value.
189 using use_iterator = ValueUseIterator<OpOperand>;
190 using use_range = iterator_range<use_iterator>;
191
192 use_iterator use_begin() const { return impl->use_begin(); }
193 use_iterator use_end() const { return use_iterator(); }
194
195 /// Returns a range of all uses, which is useful for iterating over all uses.
196 use_range getUses() const { return {use_begin(), use_end()}; }
197
198 /// Returns true if this value has exactly one use.
199 bool hasOneUse() const { return impl->hasOneUse(); }
200
201 /// Returns true if this value has no uses.
202 bool use_empty() const { return impl->use_empty(); }
203
204 //===--------------------------------------------------------------------===//
205 // Users
206
207 using user_iterator = ValueUserIterator<use_iterator, OpOperand>;
208 using user_range = iterator_range<user_iterator>;
209
210 user_iterator user_begin() const { return use_begin(); }
211 user_iterator user_end() const { return use_end(); }
212 user_range getUsers() const { return {user_begin(), user_end()}; }
213
214 //===--------------------------------------------------------------------===//
215 // Utilities
216
217 void print(raw_ostream &os);
218 void print(raw_ostream &os, AsmState &state);
219 void dump();
220
221 /// Print this value as if it were an operand.
222 void printAsOperand(raw_ostream &os, AsmState &state);
223
224 /// Methods for supporting PointerLikeTypeTraits.
225 void *getAsOpaquePointer() const { return impl; }
226 static Value getFromOpaquePointer(const void *pointer) {
227 return reinterpret_cast<detail::ValueImpl *>(const_cast<void *>(pointer));
228 }
229 detail::ValueImpl *getImpl() const { return impl; }
230
231 friend ::llvm::hash_code hash_value(Value arg);
232
233protected:
234 /// A pointer to the internal implementation of the value.
235 detail::ValueImpl *impl;
236};
237
238inline raw_ostream &operator<<(raw_ostream &os, Value value) {
239 value.print(os);
240 return os;
241}
242
243//===----------------------------------------------------------------------===//
244// OpOperand
245//===----------------------------------------------------------------------===//
246
247/// This class represents an operand of an operation. Instances of this class
248/// contain a reference to a specific `Value`.
249class OpOperand : public IROperand<OpOperand, Value> {
250public:
251 /// Provide the use list that is attached to the given value.
252 static IRObjectWithUseList<OpOperand> *getUseList(Value value) {
253 return value.getImpl();
254 }
255
256 /// Return which operand this is in the OpOperand list of the Operation.
257 unsigned getOperandNumber();
258
259private:
260 /// Keep the constructor private and accessible to the OperandStorage class
261 /// only to avoid hard-to-debug typo/programming mistakes.
262 friend class OperandStorage;
263 using IROperand<OpOperand, Value>::IROperand;
264};
265
266//===----------------------------------------------------------------------===//
267// BlockArgument
268//===----------------------------------------------------------------------===//
269
270namespace detail {
271/// The internal implementation of a BlockArgument.
272class BlockArgumentImpl : public ValueImpl {
273public:
274 static bool classof(const ValueImpl *value) {
275 return value->getKind() == ValueImpl::Kind::BlockArgument;
276 }
277
278private:
279 BlockArgumentImpl(Type type, Block *owner, int64_t index, Location loc)
280 : ValueImpl(type, Kind::BlockArgument), owner(owner), index(index),
281 loc(loc) {}
282
283 /// The owner of this argument.
284 Block *owner;
285
286 /// The position in the argument list.
287 int64_t index;
288
289 /// The source location of this argument.
290 Location loc;
291
292 /// Allow access to owner and constructor.
293 friend BlockArgument;
294};
295} // namespace detail
296
297/// This class represents an argument of a Block.
298class BlockArgument : public Value {
299public:
300 using Value::Value;
301
302 static bool classof(Value value) {
303 return llvm::isa<detail::BlockArgumentImpl>(value.getImpl());
304 }
305
306 /// Returns the block that owns this argument.
307 Block *getOwner() const { return getImpl()->owner; }
308
309 /// Returns the number of this argument.
310 unsigned getArgNumber() const { return getImpl()->index; }
311
312 /// Return the location for this argument.
313 Location getLoc() const { return getImpl()->loc; }
314 void setLoc(Location loc) { getImpl()->loc = loc; }
315
316private:
317 /// Allocate a new argument with the given type and owner.
318 static BlockArgument create(Type type, Block *owner, int64_t index,
319 Location loc) {
320 return new detail::BlockArgumentImpl(type, owner, index, loc);
321 }
322
323 /// Destroy and deallocate this argument.
324 void destroy() { delete getImpl(); }
325
326 /// Get a raw pointer to the internal implementation.
327 detail::BlockArgumentImpl *getImpl() const {
328 return reinterpret_cast<detail::BlockArgumentImpl *>(impl);
329 }
330
331 /// Cache the position in the block argument list.
332 void setArgNumber(int64_t index) { getImpl()->index = index; }
333
334 /// Allow access to `create`, `destroy` and `setArgNumber`.
335 friend Block;
336
337 /// Allow access to 'getImpl'.
338 friend Value;
339};
340
341//===----------------------------------------------------------------------===//
342// OpResult
343//===----------------------------------------------------------------------===//
344
345namespace detail {
346/// This class provides the implementation for an operation result.
347class alignas(8) OpResultImpl : public ValueImpl {
348public:
349 using ValueImpl::ValueImpl;
350
351 static bool classof(const ValueImpl *value) {
352 return value->getKind() != ValueImpl::Kind::BlockArgument;
353 }
354
355 /// Returns the parent operation of this result.
356 Operation *getOwner() const;
357
358 /// Returns the result number of this op result.
359 unsigned getResultNumber() const;
360
361 /// Returns the next operation result at `offset` after this result. This
362 /// method is useful when indexing the result storage of an operation, given
363 /// that there is more than one kind of operation result (with the different
364 /// kinds having different sizes) and that operations are stored in reverse
365 /// order.
366 OpResultImpl *getNextResultAtOffset(intptr_t offset);
367
368 /// Returns the maximum number of results that can be stored inline.
369 static unsigned getMaxInlineResults() {
370 return static_cast<unsigned>(Kind::OutOfLineOpResult);
371 }
372};
373
374/// This class provides the implementation for an operation result whose index
375/// can be represented "inline" in the underlying ValueImpl.
376struct InlineOpResult : public OpResultImpl {
377public:
378 InlineOpResult(Type type, unsigned resultNo)
379 : OpResultImpl(type, static_cast<ValueImpl::Kind>(resultNo)) {
380 assert(resultNo < getMaxInlineResults())(static_cast <bool> (resultNo < getMaxInlineResults(
)) ? void (0) : __assert_fail ("resultNo < getMaxInlineResults()"
, "mlir/include/mlir/IR/Value.h", 380, __extension__ __PRETTY_FUNCTION__
))
;
381 }
382
383 /// Return the result number of this op result.
384 unsigned getResultNumber() const { return static_cast<unsigned>(getKind()); }
385
386 static bool classof(const OpResultImpl *value) {
387 return value->getKind() != ValueImpl::Kind::OutOfLineOpResult;
388 }
389};
390
391/// This class provides the implementation for an operation result whose index
392/// cannot be represented "inline", and thus requires an additional index field.
393class OutOfLineOpResult : public OpResultImpl {
394public:
395 OutOfLineOpResult(Type type, uint64_t outOfLineIndex)
396 : OpResultImpl(type, Kind::OutOfLineOpResult),
397 outOfLineIndex(outOfLineIndex) {}
398
399 static bool classof(const OpResultImpl *value) {
400 return value->getKind() == ValueImpl::Kind::OutOfLineOpResult;
401 }
402
403 /// Return the result number of this op result.
404 unsigned getResultNumber() const {
405 return outOfLineIndex + getMaxInlineResults();
406 }
407
408 /// The trailing result number, or the offset from the beginning of the
409 /// `OutOfLineOpResult` array.
410 uint64_t outOfLineIndex;
411};
412
413/// Return the result number of this op result.
414inline unsigned OpResultImpl::getResultNumber() const {
415 if (const auto *outOfLineResult = dyn_cast<OutOfLineOpResult>(this))
416 return outOfLineResult->getResultNumber();
417 return cast<InlineOpResult>(this)->getResultNumber();
418}
419
420} // namespace detail
421
422/// This is a value defined by a result of an operation.
423class OpResult : public Value {
424public:
425 using Value::Value;
426
427 static bool classof(Value value) {
428 return llvm::isa<detail::OpResultImpl>(value.getImpl());
429 }
430
431 /// Returns the operation that owns this result.
432 Operation *getOwner() const { return getImpl()->getOwner(); }
433
434 /// Returns the number of this result.
435 unsigned getResultNumber() const { return getImpl()->getResultNumber(); }
436
437private:
438 /// Get a raw pointer to the internal implementation.
439 detail::OpResultImpl *getImpl() const {
440 return reinterpret_cast<detail::OpResultImpl *>(impl);
441 }
442
443 /// Given a number of operation results, returns the number that need to be
444 /// stored inline.
445 static unsigned getNumInline(unsigned numResults);
446
447 /// Given a number of operation results, returns the number that need to be
448 /// stored as trailing.
449 static unsigned getNumTrailing(unsigned numResults);
450
451 /// Allow access to constructor.
452 friend Operation;
453};
454
455/// Make Value hashable.
456inline ::llvm::hash_code hash_value(Value arg) {
457 return ::llvm::hash_value(arg.getImpl());
458}
459
460} // namespace mlir
461
462namespace llvm {
463
464template <>
465struct DenseMapInfo<mlir::Value> {
466 static mlir::Value getEmptyKey() {
467 void *pointer = llvm::DenseMapInfo<void *>::getEmptyKey();
468 return mlir::Value::getFromOpaquePointer(pointer);
469 }
470 static mlir::Value getTombstoneKey() {
471 void *pointer = llvm::DenseMapInfo<void *>::getTombstoneKey();
472 return mlir::Value::getFromOpaquePointer(pointer);
473 }
474 static unsigned getHashValue(mlir::Value val) {
475 return mlir::hash_value(val);
476 }
477 static bool isEqual(mlir::Value lhs, mlir::Value rhs) { return lhs == rhs; }
478};
479template <>
480struct DenseMapInfo<mlir::BlockArgument> : public DenseMapInfo<mlir::Value> {
481 static mlir::BlockArgument getEmptyKey() {
482 void *pointer = llvm::DenseMapInfo<void *>::getEmptyKey();
483 return reinterpret_cast<mlir::detail::BlockArgumentImpl *>(pointer);
484 }
485 static mlir::BlockArgument getTombstoneKey() {
486 void *pointer = llvm::DenseMapInfo<void *>::getTombstoneKey();
487 return reinterpret_cast<mlir::detail::BlockArgumentImpl *>(pointer);
488 }
489};
490template <>
491struct DenseMapInfo<mlir::OpResult> : public DenseMapInfo<mlir::Value> {
492 static mlir::OpResult getEmptyKey() {
493 void *pointer = llvm::DenseMapInfo<void *>::getEmptyKey();
494 return reinterpret_cast<mlir::detail::OpResultImpl *>(pointer);
495 }
496 static mlir::OpResult getTombstoneKey() {
497 void *pointer = llvm::DenseMapInfo<void *>::getTombstoneKey();
498 return reinterpret_cast<mlir::detail::OpResultImpl *>(pointer);
499 }
500};
501
502/// Allow stealing the low bits of a value.
503template <>
504struct PointerLikeTypeTraits<mlir::Value> {
505public:
506 static inline void *getAsVoidPointer(mlir::Value value) {
507 return const_cast<void *>(value.getAsOpaquePointer());
508 }
509 static inline mlir::Value getFromVoidPointer(void *pointer) {
510 return mlir::Value::getFromOpaquePointer(pointer);
511 }
512 enum {
513 NumLowBitsAvailable =
514 PointerLikeTypeTraits<mlir::detail::ValueImpl *>::NumLowBitsAvailable
515 };
516};
517template <>
518struct PointerLikeTypeTraits<mlir::BlockArgument>
519 : public PointerLikeTypeTraits<mlir::Value> {
520public:
521 static inline mlir::BlockArgument getFromVoidPointer(void *pointer) {
522 return reinterpret_cast<mlir::detail::BlockArgumentImpl *>(pointer);
523 }
524};
525template <>
526struct PointerLikeTypeTraits<mlir::OpResult>
527 : public PointerLikeTypeTraits<mlir::Value> {
528public:
529 static inline mlir::OpResult getFromVoidPointer(void *pointer) {
530 return reinterpret_cast<mlir::detail::OpResultImpl *>(pointer);
531 }
532};
533
534} // namespace llvm
535
536#endif