56 assert(type.getRank() > 0 &&
"Cannot handle rank-0 vectors");
57 auto vShape = type.getShape();
58 assert(llvm::all_of(vShape, [](int64_t dim) {
return dim > 0; }) &&
59 "Vector dimensions cannot be dynamic");
60 return std::accumulate(vShape.begin(), vShape.end(), 1,
61 std::multiplies<int64_t>());
81 mlir::Type stype = type.getElementType();
83 if (
auto itype = llvm::dyn_cast<mlir::IntegerType>(stype)) {
85 assert(itype.getWidth() <= 64);
88 width = itype.getWidth() <= 16 ? 32 : 64;
90 width = itype.getWidth() <= 16 ? 48 : 80;
92 mlir::Type ctype = mlir::IntegerType::get(itype.getContext(), width);
93 return mlir::VectorType::get(type.getShape(), ctype);
96 if (
auto ftype = llvm::dyn_cast<mlir::FloatType>(stype)) {
97 if (
AIE2 && ftype.getWidth() == 16)
98 return mlir::VectorType::get(type.getShape(),
99 mlir::Float32Type::get(ftype.getContext()));
106 llvm::report_fatal_error(
"Unsupported destination type");
112 llvm::ArrayRef<mlir::AffineExpr> exprs,
113 mlir::MLIRContext *context) {
115 if (sizes.empty() || exprs.empty())
118 if (is_contained(sizes, 0))
119 return getAffineConstantExpr(0, context);
121 auto maps = mlir::AffineMap::inferFromExprList(exprs, context);
125 unsigned nSymbols = maps[0].getNumSymbols();
127 mlir::AffineExpr expr;
128 bool dynamicPoisonBit =
false;
129 int64_t runningSize = 1;
130 for (
auto en : zip(reverse(exprs), reverse(sizes))) {
131 int64_t size = std::get<1>(en);
135 mlir::AffineExpr dimExpr = std::get<0>(en);
136 mlir::AffineExpr stride = dynamicPoisonBit
137 ? getAffineSymbolExpr(nSymbols++, context)
138 : getAffineConstantExpr(runningSize, context);
139 expr = expr ? expr + dimExpr * stride : dimExpr * stride;
142 if (runningSize <= 0)
145 dynamicPoisonBit =
true;
152 auto memRefType = llvm::cast<mlir::MemRefType>(updOp.getSource().getType());
153 mlir::MLIRContext *context = memRefType.getContext();
155 llvm::SmallVector<mlir::AffineExpr, 8> exprVec;
156 llvm::SmallDenseMap<mlir::Value, mlir::AffineExpr, 8> indexToExprDimMap;
157 for (
auto idxAndValue : llvm::enumerate(updOp.getIndices())) {
158 auto value = idxAndValue.value();
159 if (
auto apOf = value.getDefiningOp<mlir::affine::AffineApplyOp>()) {
160 mlir::AffineMap map = apOf.getAffineMap();
162 if (map.getNumResults() != 1)
165 llvm::SmallVector<mlir::AffineExpr, 4> indexExprs;
167 for (
auto index : apOf.getMapOperands())
168 if (
auto cIdx = index.getDefiningOp<mlir::arith::ConstantOp>()) {
170 llvm::cast<mlir::IntegerAttr>(cIdx.getValue()).getValue();
171 unsigned idx = idxVal.getSExtValue();
172 indexExprs.push_back(getAffineConstantExpr(idx, context));
174 if (!indexToExprDimMap.count(index))
175 indexToExprDimMap[index] =
176 getAffineDimExpr(indexToExprDimMap.size(), context);
177 indexExprs.push_back(indexToExprDimMap[index]);
180 exprVec.push_back(map.getResult(0).replaceDims(indexExprs));
181 }
else if (
auto cOp = value.getDefiningOp<mlir::arith::ConstantOp>()) {
182 auto idxVal = llvm::cast<mlir::IntegerAttr>(cOp.getValue()).getValue();
183 unsigned idx = idxVal.getSExtValue();
184 exprVec.push_back(getAffineConstantExpr(idx, context));
186 if (!indexToExprDimMap.count(value))
187 indexToExprDimMap[value] =
188 getAffineDimExpr(indexToExprDimMap.size(), context);
189 exprVec.push_back(indexToExprDimMap[value]);
197 memRefType.getContext());
207 mlir::AffineExpr base = expr;
210 if (
auto constExpr = llvm::dyn_cast<mlir::AffineConstantExpr>(expr)) {
212 offset += constExpr.getValue();
213 }
else if (
auto binopExpr = llvm::dyn_cast<mlir::AffineBinaryOpExpr>(expr)) {
214 if (binopExpr.getKind() == mlir::AffineExprKind::Add) {
215 mlir::AffineExpr lhs = binopExpr.getLHS(), rhs = binopExpr.getRHS();
216 if (
auto constExpr = llvm::dyn_cast<mlir::AffineConstantExpr>(lhs)) {
218 offset += constExpr.getValue();
220 if (
auto constExpr = llvm::dyn_cast<mlir::AffineConstantExpr>(rhs)) {
221 base = base == rhs ? nullptr : lhs;
222 offset += constExpr.getValue();
226 return std::make_pair(base, offset);