MLIR-AIE
Utils.cpp
Go to the documentation of this file.
1//===- Utils.cpp - Utilities to support AIE vectorization -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7// (c) Copyright 2023, Advanced Micro Devices, Inc.
8//
9//===----------------------------------------------------------------------===//
10//
11// This file implements utilities for the AIEVec dialect
12//
13//===----------------------------------------------------------------------===//
14
16#include "mlir/Dialect/Affine/IR/AffineOps.h"
17#include "mlir/Dialect/Arith/IR/Arith.h"
18#include "mlir/Dialect/Vector/IR/VectorOps.h"
19#include "llvm/ADT/TypeSwitch.h"
20#include <numeric>
21
22#define DEBUG_TYPE "aievec-utils"
23
24using namespace mlir;
25
26namespace xilinx::aievec {
27
28static std::optional<int64_t> getLowerBoundValue(Value idx) {
29 if (auto blkArg = dyn_cast<BlockArgument>(idx)) {
30 auto parentOp = blkArg.getOwner()->getParentOp();
31 return TypeSwitch<Operation *, std::optional<int64_t>>(parentOp)
32 .Case<affine::AffineForOp>([&blkArg](affine::AffineForOp forOp) {
33 if (forOp.getInductionVar() == blkArg &&
34 forOp.hasConstantLowerBound())
35 return std::optional<int64_t>(forOp.getConstantLowerBound());
36 // If it's an iteration argument or the lower bound is an
37 // affine expression.
38 // TODO: Compute the value of the lower bound affine expression
39 // TODO: if it's constant.
40 return std::optional<int64_t>();
41 })
42 .Default([](auto) { return std::optional<int64_t>(); });
43 }
44 return TypeSwitch<Operation *, std::optional<int64_t>>(idx.getDefiningOp())
45 .Case<arith::ConstantOp>([](auto constantOp) {
46 return std::optional<int64_t>(
47 cast<IntegerAttr>(constantOp.getValue()).getInt());
48 })
49 .Case<affine::AffineApplyOp>([](auto applyOp) {
50 if (applyOp.getAffineMap().getNumResults() == 1) {
51 auto affineMap = applyOp.getAffineMap();
52
53 // Check if the map has symbols - if so, try symbol-aware evaluation
54 if (affineMap.getNumSymbols() > 0) {
55 auto operands = applyOp.getMapOperands();
56 unsigned numDims = affineMap.getNumDims();
57
58 SmallVector<int64_t, 4> dimValues;
59 SmallVector<int64_t, 4> symbolValues;
60
61 // Collect dimension values
62 for (unsigned i = 0; i < numDims; i++) {
63 std::optional<int64_t> lbv = getLowerBoundValue(operands[i]);
64 if (!lbv && !isa<BlockArgument>(operands[i]))
65 return std::optional<int64_t>();
66 dimValues.push_back(lbv.value_or(0L));
67 }
68
69 // Collect symbol values
70 for (unsigned i = numDims; i < operands.size(); i++) {
71 std::optional<int64_t> lbv = getLowerBoundValue(operands[i]);
72 if (!lbv && !isa<BlockArgument>(operands[i]))
73 return std::optional<int64_t>();
74 symbolValues.push_back(lbv.value_or(0L));
75 }
76
77 // Manually replace and constant fold
78 auto expr = affineMap.getResult(0);
79 auto ctx = affineMap.getContext();
80
81 // Convert int64_t values to AffineConstantExpr
82 SmallVector<AffineExpr, 4> dimExprs;
83 SmallVector<AffineExpr, 4> symbolExprs;
84 for (auto val : dimValues)
85 dimExprs.push_back(getAffineConstantExpr(val, ctx));
86 for (auto val : symbolValues)
87 symbolExprs.push_back(getAffineConstantExpr(val, ctx));
88
89 expr = expr.replaceDimsAndSymbols(dimExprs, symbolExprs);
90 if (auto constExpr = dyn_cast<AffineConstantExpr>(expr)) {
91 return std::optional<int64_t>(constExpr.getValue());
92 }
93
94 // If expression couldn't be constant-folded, return nullopt
95 return std::optional<int64_t>();
96 }
97
98 auto operands = applyOp.getMapOperands();
99 SmallVector<int64_t, 4> srcIndices;
100 for (auto index : operands) {
101 std::optional<int64_t> lbv = getLowerBoundValue(index);
102 // XXX: We assume block arguments to either have well-defined
103 // XXX: compile-time values, or to be aligned.
104 if (!lbv && !isa<BlockArgument>(index))
105 return std::optional<int64_t>();
106 srcIndices.push_back(lbv.value_or(0L));
107 }
108 return std::optional<int64_t>(affineMap.compose(srcIndices)[0]);
109 }
110 return std::optional<int64_t>();
111 })
112 .Default([&](auto) { return std::optional<int64_t>(); });
113}
114
115// Return the offset of a given transfer read operation with regards to the
116// specified vector type. If the read is aligned to the specified alignment
117// parameter (in bits), then the offset is 0. Otherwise, the offset is the
118// number of elements past the immediately preceding aligned vector length.
119template <typename TransferReadLikeOp, typename>
120std::optional<int64_t> getTransferReadAlignmentOffset(TransferReadLikeOp readOp,
121 VectorType vType,
122 int64_t alignment) {
123 // TODO: Add support for cases where the index is not comming from an
124 // TODO: `affine.apply` op or when the affine map has more than one
125 // TODO: dimension. We also need to address the case where the index is an
126 // TODO: induction variable.
127 auto innerMostIndex = readOp.getIndices().back();
128 auto vectorLength = vType.getShape().back();
129 std::optional<int64_t> lbv = getLowerBoundValue(innerMostIndex);
130 if (!lbv)
131 return std::nullopt;
132 int64_t vectorLengthAlignmentOffset = lbv.value() % vectorLength;
133 int64_t absoluteAlignmentOffset = alignment / vType.getElementTypeBitWidth();
134 if (vectorLengthAlignmentOffset % absoluteAlignmentOffset)
135 return vectorLengthAlignmentOffset;
136 return 0;
137}
138
139template std::optional<int64_t>
140getTransferReadAlignmentOffset(vector::TransferReadOp readOp, VectorType vType,
141 int64_t alignment);
142template std::optional<int64_t>
143getTransferReadAlignmentOffset(vector::TransferReadOp::Adaptor readOp,
144 VectorType vType, int64_t alignment);
145
146VectorType getFlattenedVectorType(VectorType vecTy) {
147 if (vecTy.getRank() == 1)
148 return vecTy;
149 auto shape = vecTy.getShape();
150 return VectorType::get(
151 {std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>())},
152 vecTy.getElementType());
153}
154
155} // namespace xilinx::aievec
mlir::VectorType getFlattenedVectorType(mlir::VectorType vecTy)
std::optional< int64_t > getTransferReadAlignmentOffset(TransferReadLikeOp readOp, mlir::VectorType vType, int64_t alignment)