MLIR-AIE
AIERT.cpp
Go to the documentation of this file.
1//===- AIERT.cpp ------------------------------------------------*- C++ -*-===//
2//
3// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7// Copyright (C) 2024, Advanced Micro Devices, Inc. All rights reserved.
8//
9//===----------------------------------------------------------------------===//
10
11#include "aie/Targets/AIERT.h"
13
14#include "mlir/Support/LogicalResult.h"
15
16extern "C" {
17#include "xaiengine/xaie_core.h"
18#include "xaiengine/xaie_dma.h"
19#include "xaiengine/xaie_elfloader.h"
20#include "xaiengine/xaie_interrupt.h"
21#include "xaiengine/xaie_locks.h"
22#include "xaiengine/xaie_mem.h"
23#include "xaiengine/xaie_perfcnt.h"
24#include "xaiengine/xaie_plif.h"
25#include "xaiengine/xaie_reset.h"
26#include "xaiengine/xaie_ss.h"
27#include "xaiengine/xaie_txn.h"
28#include "xaiengine/xaiegbl.h"
29#include "xaiengine/xaiegbl_defs.h"
30}
31
32#include <filesystem>
33
34#define AIERC_STR(x) x, #x
35static const std::map<AieRC, std::string> AIERCTOSTR = {
36 {AIERC_STR(XAIE_OK)},
37 {AIERC_STR(XAIE_ERR)},
38 {AIERC_STR(XAIE_INVALID_DEVICE)},
39 {AIERC_STR(XAIE_INVALID_RANGE)},
40 {AIERC_STR(XAIE_INVALID_ARGS)},
41 {AIERC_STR(XAIE_INVALID_TILE)},
42 {AIERC_STR(XAIE_ERR_STREAM_PORT)},
43 {AIERC_STR(XAIE_INVALID_DMA_TILE)},
44 {AIERC_STR(XAIE_INVALID_BD_NUM)},
45 {AIERC_STR(XAIE_ERR_OUTOFBOUND)},
46 {AIERC_STR(XAIE_INVALID_DATA_MEM_ADDR)},
47 {AIERC_STR(XAIE_INVALID_ELF)},
48 {AIERC_STR(XAIE_CORE_STATUS_TIMEOUT)},
49 {AIERC_STR(XAIE_INVALID_CHANNEL_NUM)},
50 {AIERC_STR(XAIE_INVALID_LOCK)},
51 {AIERC_STR(XAIE_INVALID_DMA_DIRECTION)},
52 {AIERC_STR(XAIE_INVALID_PLIF_WIDTH)},
53 {AIERC_STR(XAIE_INVALID_LOCK_ID)},
54 {AIERC_STR(XAIE_INVALID_LOCK_VALUE)},
55 {AIERC_STR(XAIE_LOCK_RESULT_FAILED)},
56 {AIERC_STR(XAIE_INVALID_DMA_DESC)},
57 {AIERC_STR(XAIE_INVALID_ADDRESS)},
58 {AIERC_STR(XAIE_FEATURE_NOT_SUPPORTED)},
59 {AIERC_STR(XAIE_INVALID_BURST_LENGTH)},
60 {AIERC_STR(XAIE_INVALID_BACKEND)},
61 {AIERC_STR(XAIE_INSUFFICIENT_BUFFER_SIZE)},
62 {AIERC_STR(XAIE_ERR_MAX)}};
63#undef AIERC_STR
64
65static const std::map<xilinx::AIE::WireBundle, StrmSwPortType>
66 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE = {
67 {xilinx::AIE::WireBundle::Core, StrmSwPortType::CORE},
68 {xilinx::AIE::WireBundle::DMA, StrmSwPortType::DMA},
69 {xilinx::AIE::WireBundle::TileControl, StrmSwPortType::CTRL},
70 {xilinx::AIE::WireBundle::FIFO, StrmSwPortType::FIFO},
71 {xilinx::AIE::WireBundle::South, StrmSwPortType::SOUTH},
72 {xilinx::AIE::WireBundle::West, StrmSwPortType::WEST},
73 {xilinx::AIE::WireBundle::North, StrmSwPortType::NORTH},
74 {xilinx::AIE::WireBundle::East, StrmSwPortType::EAST},
75 // missing PLIO from WireBundle
76 // missing NOC from WireBundle
77 {xilinx::AIE::WireBundle::Trace, StrmSwPortType::TRACE},
78};
79
80#ifndef NDEBUG
81
82// https://stackoverflow.com/a/32230306
83template <typename H1>
84llvm::raw_ostream &showAIEXRTArgs(llvm::raw_ostream &out, const char *label,
85 H1 &&value) {
86 return out << label << "=" << std::forward<H1>(value);
87}
88
89template <typename H1, typename... T>
90llvm::raw_ostream &showAIEXRTArgs(llvm::raw_ostream &out, const char *label,
91 H1 &&value, T &&...rest) {
92 const char *pcomma = strchr(label, ',');
93 return showAIEXRTArgs(out.write(label, pcomma - label)
94 << "=" << std::forward<H1>(value) << ',',
95 pcomma + 1, std::forward<T>(rest)...);
96}
97
98llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const XAie_LocType &loc);
99
100llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const XAie_Lock &lock);
101
102llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const XAie_Packet &packet);
103
104#define SHOW_AIERT_ARGS(os, ...) showAIEXRTArgs(os, #__VA_ARGS__, __VA_ARGS__)
105
106// So that we can use the pattern if(auto r = TRY_XAIE_API...) { // r is nonzero
107// }
108static_assert(XAIE_OK == 0);
109
110#define TRY_XAIE_API_FATAL_ERROR(API, ...) \
111 do { \
112 LLVM_DEBUG(llvm::dbgs() << "trying XAIE API: " << #API << " with args: "); \
113 LLVM_DEBUG(SHOW_AIERT_ARGS(llvm::dbgs(), __VA_ARGS__)); \
114 LLVM_DEBUG(llvm::dbgs() << "\n"); \
115 if (auto r = API(__VA_ARGS__)) \
116 llvm::report_fatal_error(llvm::Twine(#API " failed with ") + \
117 AIERCTOSTR.at(r)); \
118 } while (0)
119
120#define TRY_XAIE_API_EMIT_ERROR(OP, API, ...) \
121 do { \
122 LLVM_DEBUG(llvm::dbgs() << "trying XAIE API: " << #API << " with args: "); \
123 LLVM_DEBUG(SHOW_AIERT_ARGS(llvm::dbgs(), __VA_ARGS__)); \
124 LLVM_DEBUG(llvm::dbgs() << "\n"); \
125 if (auto r = API(__VA_ARGS__)) \
126 return OP.emitOpError() << #API " failed with " << AIERCTOSTR.at(r); \
127 } while (0)
128
129#define TRY_XAIE_API_LOGICAL_RESULT(API, ...) \
130 do { \
131 LLVM_DEBUG(llvm::dbgs() << "trying XAIE API: " << #API << " with args: "); \
132 LLVM_DEBUG(SHOW_AIERT_ARGS(llvm::dbgs(), __VA_ARGS__)); \
133 LLVM_DEBUG(llvm::dbgs() << "\n"); \
134 if (auto r = API(__VA_ARGS__)) { \
135 llvm::errs() << #API " failed with " << AIERCTOSTR.at(r); \
136 return failure(); \
137 } \
138 } while (0)
139
140#else
141
142#define TRY_XAIE_API_FATAL_ERROR(API, ...) \
143 do { \
144 if (auto r = API(__VA_ARGS__)) \
145 llvm::report_fatal_error(llvm::Twine(#API " failed with ") + \
146 AIERCTOSTR.at(r)); \
147 } while (0)
148
149#define TRY_XAIE_API_EMIT_ERROR(OP, API, ...) \
150 do { \
151 if (auto r = API(__VA_ARGS__)) \
152 return OP.emitOpError() << #API " failed with " << AIERCTOSTR.at(r); \
153 } while (0)
154
155#define TRY_XAIE_API_LOGICAL_RESULT(API, ...) \
156 do { \
157 if (auto r = API(__VA_ARGS__)) { \
158 llvm::errs() << #API " failed with " << AIERCTOSTR.at(r); \
159 return failure(); \
160 } \
161 } while (0)
162
163#endif
164
165using namespace mlir;
166using namespace xilinx;
167
168#define DEBUG_TYPE "aie-aiert"
169
170llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const XAie_LocType &loc) {
171 os << "XAie_LocType(col: " << std::to_string(loc.Col)
172 << ", row: " << std::to_string(loc.Row) << ")";
173 return os;
174}
175
176llvm::raw_ostream &operator<<(llvm::raw_ostream &os, const XAie_Lock &lock) {
177 os << "XAie_Lock(id: " << std::to_string(lock.LockId)
178 << ", val: " << std::to_string(lock.LockVal) << ")";
179 return os;
180}
181
182llvm::raw_ostream &operator<<(llvm::raw_ostream &os,
183 const XAie_Packet &packet) {
184 os << "XAie_Packet(id: " << std::to_string(packet.PktId)
185 << ", type: " << std::to_string(packet.PktType) << ")";
186 return os;
187}
188
189#define XAIE_BASE_ADDR 0x40000000
190#define XAIE_SHIM_ROW 0
191#define XAIE_MEM_TILE_ROW_START 1
192#define XAIE_PARTITION_BASE_ADDR 0x0
193
194#define NPI_ADDR 0x0
195#define NUM_LOCKS 16
196#define EVEN_BD_NUM_START 0
197#define ODD_BD_NUM_START 24
198
200 XAie_Config configPtr;
201 XAie_DevInst devInst;
202};
203
205
207 : targetModel(tm), aiert(std::make_unique<AIERtImpl>()) {
208 // The first column in the NPU lacks a shim tile. AIE-RT exposes some of
209 // the internals about how this is modeled in a somewhat awkward way.
210 size_t partitionStartCol =
212 size_t partitionNumCols = tm.columns();
213 size_t deviceRows = tm.rows();
214 size_t deviceCols = tm.columns() + partitionStartCol;
215
216 // Don't put this in the target model, because it's XAIE specific.
217 unsigned char devGen;
218 switch (tm.getTargetArch()) {
219 case AIEArch::AIE1: // probably unreachable.
220 devGen = XAIE_DEV_GEN_AIE;
221 break;
222 case AIEArch::AIE2:
223 // FIXME: What if we don't have an IPU? aie-rt
224 // models non-IPU devices differently.
225 devGen = XAIE_DEV_GEN_AIE2IPU;
226 break;
227 case AIEArch::AIE2p:
228 devGen = XAIE_DEV_GEN_AIE2P_STRIX_B0;
229 break;
230 }
231 aiert->configPtr = XAie_Config{
232 /*AieGen*/ devGen,
233 /*BaseAddr*/ XAIE_BASE_ADDR,
234 /*ColShift*/ static_cast<uint8_t>(tm.getColumnShift()),
235 /*RowShift*/ static_cast<uint8_t>(tm.getRowShift()),
236 /*NumRows*/ static_cast<uint8_t>(deviceRows),
237 /*NumCols*/ static_cast<uint8_t>(deviceCols),
238 /*ShimRowNum*/ XAIE_SHIM_ROW,
239 /*MemTileRowStart*/ XAIE_MEM_TILE_ROW_START,
240 /*MemTileNumRows*/ static_cast<uint8_t>(tm.getNumMemTileRows()),
241 /*AieTileRowStart*/
242 static_cast<uint8_t>(XAIE_MEM_TILE_ROW_START + tm.getNumMemTileRows()),
243 /*AieTileNumRows*/
244 static_cast<uint8_t>(tm.rows() - tm.getNumMemTileRows() - 1),
245 /*PartProp*/ {},
246 /*Backend*/ XAIE_IO_BACKEND_CDO};
247 XAie_InstDeclare(_devInst, &aiert->configPtr);
248 aiert->devInst = _devInst;
249 TRY_XAIE_API_FATAL_ERROR(XAie_SetupPartitionConfig, &aiert->devInst,
250 XAIE_PARTITION_BASE_ADDR, partitionStartCol,
251 partitionNumCols);
252 TRY_XAIE_API_FATAL_ERROR(XAie_CfgInitialize, &aiert->devInst,
253 &aiert->configPtr);
254 TRY_XAIE_API_FATAL_ERROR(XAie_UpdateNpiAddr, &aiert->devInst, NPI_ADDR);
255}
256
258 bool xaieDebug) {
259 // Quoting: The instance of a device must be always declared using this
260 // macro. In the future, the same macro will be expanded to
261 // allocate more memory from the user application for resource
262 // management.
263 if (aieSim) {
264 TRY_XAIE_API_FATAL_ERROR(XAie_SetIOBackend, &aiert->devInst,
265 XAIE_IO_BACKEND_SIM);
266 } else if (xaieDebug)
267 TRY_XAIE_API_FATAL_ERROR(XAie_SetIOBackend, &aiert->devInst,
268 XAIE_IO_BACKEND_DEBUG);
269 else
270 TRY_XAIE_API_FATAL_ERROR(XAie_SetIOBackend, &aiert->devInst,
271 XAIE_IO_BACKEND_CDO);
272 return success();
273}
274
275LogicalResult configureLocksInBdBlock(const AIE::AIETargetModel &targetModel,
276 XAie_DmaDesc &dmaTileBd, Block &block,
277 int col, int row) {
278 LLVM_DEBUG(llvm::dbgs() << "\nstart configuring bds\n");
279 std::optional<int> acqValue, relValue, acqLockId, relLockId;
280 bool acqEn = false;
281
282 // switch (lock->getAc)
283 AIE::LockOp lock;
284 for (auto op : block.getOps<AIE::UseLockOp>()) {
285 // Only dyn_cast if you are going to check if it was of the type
286 // expected; if you aren't checking use cast instead as it will at
287 // least assert in debug mode with an easier to understand error than
288 // dereferencing.
289 lock = cast<AIE::LockOp>(op.getLock().getDefiningOp());
290 switch (op.getAction()) {
291 case AIE::LockAction::Acquire:
292 case AIE::LockAction::AcquireGreaterEqual:
293 acqEn = op.getAcqEn();
294 acqLockId = lock.getLockIDValue();
295 acqValue = op.getLockValue();
296 if (op.acquireGE())
297 acqValue.value() = -acqValue.value();
298 break;
299 case AIE::LockAction::Release:
300 relLockId = lock.getLockIDValue();
301 relValue = op.getLockValue();
302 break;
303 }
304 }
305
306 assert(acqValue && relValue && acqLockId && relLockId &&
307 "expected both use_lock(acquire) and use_lock(release) with bd");
308
309 if (targetModel.isMemTile(col, row)) {
310 auto lockOffset = targetModel.getLockLocalBaseIndex(
311 col, row, lock.colIndex(), lock.rowIndex());
312 if (lockOffset && acqLockId)
313 acqLockId.value() += lockOffset.value();
314 if (lockOffset && relLockId)
315 relLockId.value() += lockOffset.value();
316 }
317
318 // no RelEn in the arch spec even though the API requires you to set it?
319 bool relEn = false;
320 XAie_Lock acqLock = XAie_LockInit(acqLockId.value(), acqValue.value());
321 XAie_Lock relLock = XAie_LockInit(relLockId.value(), relValue.value());
322 TRY_XAIE_API_EMIT_ERROR((*block.getOps<AIE::UseLockOp>().begin()),
323 dmaTileBd.DmaMod->SetLock, &dmaTileBd, acqLock,
324 relLock, acqEn, relEn);
325 return success();
326}
327
328LogicalResult configureBdInBlock(const AIE::AIETargetModel &targetModel,
329 XAie_DevInst *devInst, XAie_DmaDesc &dmaTileBd,
330 Block &block, int col, int row, int bdId,
331 std::optional<int> nextBdId) {
332 std::optional<int> packetType;
333 std::optional<int> packetID;
334
335 // Below should go
336 auto maybePacketOps = block.getOps<AIE::DMABDPACKETOp>();
337 if (!maybePacketOps.empty()) {
338 assert(llvm::range_size(maybePacketOps) == 1 &&
339 "expected only one dma_bd_packet");
340 auto packetOp = *maybePacketOps.begin();
341 packetType = packetOp.getPacketType();
342 packetID = packetOp.getPacketID();
343 }
344
345 auto bdOp = *block.getOps<AIE::DMABDOp>().begin();
346
347 if (targetModel.isShimNOCTile(col, row)) {
348 // write them out like this so they show up with names in debug prints
349 uint8_t smid = 0;
350 uint32_t burstLen =
351 getShimBurstLengthBytes(targetModel, bdOp.getBurstLength());
352 uint8_t qOs = 0;
353 uint8_t cache = 0;
354 uint8_t secure = 0;
355 TRY_XAIE_API_EMIT_ERROR(bdOp, XAie_DmaSetAxi, &dmaTileBd, smid,
356 burstLen / 16, qOs, cache, secure);
357 }
358
359 // get address from BufferOp (core,mem) or ExternalBufferOp (shim)
360 uint64_t baseAddr = 0;
361 if (targetModel.isShimNOCTile(col, row)) {
362 auto bufferOp =
363 cast<AIE::ExternalBufferOp>(bdOp.getBuffer().getDefiningOp());
364 // external buffers aren't required to have an address here because the
365 // address might get patched later or the default of zero might be a valid
366 // address.
367 if (bufferOp.getAddress())
368 baseAddr = bufferOp.getAddress().value();
369 } else {
370 auto bufferOp = cast<AIE::BufferOp>(bdOp.getBuffer().getDefiningOp());
371 if (!bufferOp.getAddress())
372 return bufferOp.emitError("buffer must have address assigned");
373 baseAddr = bufferOp.getAddress().value();
374 }
375
376 if (targetModel.isMemTile(col, row)) {
377 // check if buffer is allocated on the same memtile, the west, or the east
378 // one
379 auto bufferOp = cast<AIE::BufferOp>(bdOp.getBuffer().getDefiningOp());
380 auto bufferRow = bufferOp.getTileOp().getRow();
381 auto bufferCol = bufferOp.getTileOp().getCol();
382 auto addrOffset =
383 targetModel.getMemLocalBaseAddress(col, row, bufferCol, bufferRow);
384 if (addrOffset)
385 baseAddr += addrOffset.value();
386 }
387
388 std::optional<llvm::ArrayRef<AIE::BDDimLayoutAttr>> dims =
389 bdOp.getDimensions();
390 uint64_t lenInBytes = bdOp.getLenInBytes();
391 uint64_t basePlusOffsetInBytes = baseAddr + bdOp.getOffsetInBytes();
392 if (!dims) {
393 TRY_XAIE_API_EMIT_ERROR(bdOp, XAie_DmaSetAddrLen, &dmaTileBd,
394 basePlusOffsetInBytes, lenInBytes);
395 } else {
396 XAie_DmaTensor dmaTileBdTensor = {};
397 dmaTileBdTensor.NumDim = dims->size();
398 dmaTileBdTensor.Dim = static_cast<XAie_DmaDimDesc *>(
399 calloc(dmaTileBdTensor.NumDim, sizeof(XAie_DmaDimDesc)));
400 if (!dmaTileBdTensor.Dim)
401 return bdOp.emitError("couldn't allocate array of XAie_DmaDimDesc");
402 // libxaie requires stride in multiples of 32b
403 double elementWidthIn32bWords =
404 static_cast<double>(bdOp.getBufferElementTypeWidthInBytes()) / 4.0;
405 for (size_t i = 0; i < dims->size(); i++) {
406 // Pass down dimensions in reverse order; in the MLIR, this allows
407 // us to specify step sizes/wraps in the same order as we would
408 // access a multi-dim C array, with the highest dimension first.
409 int j = dims->size() - i - 1;
410 uint16_t size;
411 uint32_t stride;
412 if (j > 0) {
413 stride = static_cast<uint32_t>(dims.value()[i].getStride() *
414 elementWidthIn32bWords);
415 size = dims.value()[i].getSize();
416 } else {
417 stride = dims.value()[i].getStride();
418 size = static_cast<uint16_t>(dims.value()[i].getSize() *
419 elementWidthIn32bWords);
420 }
421 stride = stride > 0 ? stride : 1;
422 // Assume AIE-ML architecture (ie use AieMlDimDesc instead of AieDimDesc);
423 // asserted in AIETranslateToCDODirect).
424 dmaTileBdTensor.Dim[j].AieMlDimDesc = {stride, size};
425 }
426 TRY_XAIE_API_EMIT_ERROR(bdOp, XAie_DmaSetMultiDimAddr, &dmaTileBd,
427 &dmaTileBdTensor, basePlusOffsetInBytes,
428 lenInBytes);
429 }
430
431 // ND zero padding.
432 std::optional<llvm::ArrayRef<AIE::BDPadLayoutAttr>> padDims =
433 bdOp.getPadDimensions();
434
435 if (padDims) {
436 XAie_DmaPadTensor dmaPadTensor = {};
437 dmaPadTensor.NumDim = padDims->size();
438 dmaPadTensor.PadDesc = static_cast<XAie_PadDesc *>(
439 calloc(dmaPadTensor.NumDim, sizeof(XAie_PadDesc)));
440 if (!dmaPadTensor.PadDesc)
441 return bdOp.emitError("couldn't allocate array of XAie_PadDesc");
442 // libxaie requires stride in multiples of 32b
443 double elementWidthIn32bWords =
444 static_cast<double>(bdOp.getBufferElementTypeWidthInBytes()) / 4.0;
445 for (size_t i = 0; i < padDims->size(); i++) {
446 // Pass down dimensions in reverse order.
447 int j = padDims->size() - i - 1;
448 uint8_t before;
449 uint8_t after;
450 if (j > 0) {
451 before = static_cast<uint8_t>(padDims.value()[i].getConstPadBefore());
452 after = static_cast<uint8_t>(padDims.value()[i].getConstPadAfter());
453 } else {
454 before = static_cast<uint8_t>(padDims.value()[i].getConstPadBefore() *
455 elementWidthIn32bWords);
456 after = static_cast<uint8_t>(padDims.value()[i].getConstPadAfter() *
457 elementWidthIn32bWords);
458 }
459 dmaPadTensor.PadDesc[j] = {before, after};
460 }
461 TRY_XAIE_API_EMIT_ERROR(bdOp, XAie_DmaSetPadding, &dmaTileBd,
462 &dmaPadTensor);
463 }
464 if (nextBdId) {
465 auto enableNextBd = 1;
466 TRY_XAIE_API_EMIT_ERROR(bdOp, XAie_DmaSetNextBd, &dmaTileBd,
467 nextBdId.value(), enableNextBd);
468 }
469
470 if (auto packetInfo = bdOp.getPacket()) {
471 packetType = packetInfo->getPktType();
472 packetID = packetInfo->getPktId();
473 }
474
475 if (packetID) {
476 if (!packetType)
477 bdOp.emitError("must have packetType with packetID");
478 if (bdOp.getLen() == 0)
479 return bdOp.emitOpError(
480 "For MM2S channels, if Buffer_Length=0 then Enable_Packet must be "
481 "set to 0, otherwise behavior is undefined (3.7.8 arch spec)");
483 bdOp, XAie_DmaSetPkt, &dmaTileBd,
484 XAie_PacketInit(packetID.value(), packetType.value()));
485 }
486 TRY_XAIE_API_EMIT_ERROR(bdOp, XAie_DmaEnableBd, &dmaTileBd);
487 auto tileLoc = XAie_TileLoc(col, row);
488 TRY_XAIE_API_EMIT_ERROR(bdOp, XAie_DmaWriteBd, devInst, &dmaTileBd, tileLoc,
489 bdId);
490 LLVM_DEBUG(llvm::dbgs() << "\nend configuring bds\n");
491 return success();
492};
493
495 Operation &op, int col, int row, int chNum, const DMAChannelDir &channelDir,
496 int bdId, int repeatCount) {
497 XAie_DmaDirection direction =
498 channelDir == DMAChannelDir::S2MM ? DMA_S2MM : DMA_MM2S;
499 auto tileLoc = XAie_TileLoc(col, row);
500 auto enTokenIssue = tileLoc.Row == 0 && direction == DMA_S2MM;
501 // in english repeat_count==0 means "do it once" and don't repeat but
502 // libxaie treats repeat_count=1 as do it once.
503 repeatCount += 1;
504 TRY_XAIE_API_EMIT_ERROR(op, XAie_DmaChannelSetStartQueue, &aiert->devInst,
505 tileLoc, chNum, direction, bdId, repeatCount,
506 enTokenIssue);
507 TRY_XAIE_API_EMIT_ERROR(op, XAie_DmaChannelEnable, &aiert->devInst, tileLoc,
508 chNum, direction);
509 return success();
510};
511
513 int col, int row) {
514 DMABDOp bd = *block.getOps<DMABDOp>().begin();
515 assert(bd.getBdId().has_value() &&
516 "DMABDOp must have assigned bd_id; did you forget to run "
517 "aie-assign-bd-ids?");
518 XAie_DmaDesc dmaTileBd;
519 auto tileLoc = XAie_TileLoc(col, row);
520 TRY_XAIE_API_EMIT_ERROR(bd, XAie_DmaDescInit, &aiert->devInst, &dmaTileBd,
521 tileLoc);
522 if (!block.getOps<UseLockOp>().empty() &&
523 failed(configureLocksInBdBlock(targetModel, dmaTileBd, block, col, row)))
524 return failure();
525 if (!block.getOps<DMABDOp>().empty() &&
526 failed(configureBdInBlock(targetModel, &aiert->devInst, dmaTileBd, block,
527 col, row, bd.getBdId().value(),
528 bd.getNextBdId())))
529 return failure();
530 return success();
531}
532
533LogicalResult xilinx::AIE::AIERTControl::initLocks(DeviceOp &targetOp) {
534 for (auto tileOp : targetOp.getOps<TileOp>()) {
535 auto tileLoc = XAie_TileLoc(tileOp.colIndex(), tileOp.rowIndex());
536 if (!tileOp.isShimTile() && tileOp.getCoreOp()) {
537 TRY_XAIE_API_EMIT_ERROR(tileOp, XAie_CoreReset, &aiert->devInst, tileLoc);
538 TRY_XAIE_API_EMIT_ERROR(tileOp, XAie_CoreUnreset, &aiert->devInst,
539 tileLoc);
540 // Set locks to zero
541 for (uint8_t l = 0; l < NUM_LOCKS; l++) {
542 auto locInit = XAie_LockInit(l, 0);
543 TRY_XAIE_API_EMIT_ERROR(tileOp, XAie_LockSetValue, &aiert->devInst,
544 tileLoc, locInit);
545 }
546 }
547 }
548
549 // Set locks with explicit initializers
550 targetOp.walk<WalkOrder::PreOrder>([&](LockOp lockOp) {
551 if (lockOp.getLockID() && lockOp.getInit()) {
552 auto tileLoc = XAie_TileLoc(lockOp.getTileOp().colIndex(),
553 lockOp.getTileOp().rowIndex());
554 auto locInit = XAie_LockInit(*lockOp.getLockID(), *lockOp.getInit());
555 TRY_XAIE_API_FATAL_ERROR(XAie_LockSetValue, &aiert->devInst, tileLoc,
556 locInit);
557 } else
558 LLVM_DEBUG(llvm::dbgs()
559 << "lock op missing either id or init" << lockOp << "\n");
560 });
561 return success();
562}
563
564LogicalResult xilinx::AIE::AIERTControl::initBuffers(DeviceOp &targetOp) {
565 // Set buffers with explicit initializers
566 targetOp.walk<WalkOrder::PreOrder>([&](BufferOp bufferOp) {
567 auto initialValue = bufferOp.getInitialValue();
568 if (!initialValue)
569 return;
570 mlir::DenseElementsAttr denseInit =
571 dyn_cast<mlir::DenseElementsAttr>(initialValue.value());
572 if (!denseInit)
573 return;
574 auto tileLoc = XAie_TileLoc(bufferOp.getTileOp().colIndex(),
575 bufferOp.getTileOp().rowIndex());
576 std::vector<char> byteVec;
577 if (denseInit.getElementType().isIntOrIndex()) {
578 for (auto intVal : denseInit.getValues<APInt>()) {
579 // Get the size in bytes
580 size_t byteSize = (intVal.getBitWidth() + 7) / 8;
581 // Create a buffer for the integer bytes and copy
582 std::vector<char> bytes(byteSize);
583 std::copy(
584 static_cast<const char *>(static_cast<const void *>(&intVal)),
585 static_cast<const char *>(static_cast<const void *>(&intVal)) +
586 byteSize,
587 bytes.begin());
588 byteVec.insert(byteVec.end(), bytes.begin(), bytes.end());
589 }
590 } else if (isa<FloatType>(denseInit.getElementType())) {
591 for (auto floatVal : denseInit.getValues<APFloat>()) {
592 APInt floatInt = floatVal.bitcastToAPInt();
593 // Get the size in bytes
594 size_t byteSize = (floatInt.getBitWidth() + 7) / 8;
595 // Create a buffer for the float bytes and copy
596 std::vector<char> bytes(byteSize);
597 std::copy(
598 static_cast<const char *>(static_cast<const void *>(&floatInt)),
599 static_cast<const char *>(static_cast<const void *>(&floatInt)) +
600 byteSize,
601 bytes.begin());
602 byteVec.insert(byteVec.end(), bytes.begin(), bytes.end());
603 }
604 } else {
605 llvm::outs() << "buffer op type not supported for initialization "
606 << bufferOp << "\n";
607 return;
608 }
609 TRY_XAIE_API_FATAL_ERROR(XAie_DataMemBlockWrite, &aiert->devInst, tileLoc,
610 bufferOp.getAddress().value(), byteVec.data(),
611 byteVec.size());
612 });
613 return success();
614}
615
616LogicalResult xilinx::AIE::AIERTControl::configureSwitches(DeviceOp &targetOp) {
617
618 // StreamSwitch (switchbox) configuration
619 for (auto switchboxOp : targetOp.getOps<SwitchboxOp>()) {
620 int32_t col = switchboxOp.colIndex();
621 int32_t row = switchboxOp.rowIndex();
622 XAie_LocType tileLoc = XAie_TileLoc(col, row);
623 assert(targetModel.hasProperty(AIETargetModel::IsNPU) &&
624 "Only NPU currently supported");
625
626 Block &b = switchboxOp.getConnections().front();
627 for (auto connectOp : b.getOps<ConnectOp>())
629 switchboxOp, XAie_StrmConnCctEnable, &aiert->devInst, tileLoc,
630 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(connectOp.getSourceBundle()),
631 connectOp.sourceIndex(),
632 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(connectOp.getDestBundle()),
633 connectOp.destIndex());
634
635 for (auto masterSetOp : b.getOps<MasterSetOp>()) {
636 int mask = 0;
637 int arbiter = -1;
638
639 for (auto val : masterSetOp.getAmsels()) {
640 AMSelOp amsel = cast<AMSelOp>(val.getDefiningOp());
641 arbiter = amsel.arbiterIndex();
642 int msel = amsel.getMselValue();
643 mask |= (1 << msel);
644 }
645
646 // the default is to keep header
647 bool keepHeader = true;
648 // the default for dma destinations is to drop the header
649 if (masterSetOp.getDestBundle() == WireBundle::DMA)
650 keepHeader = false;
651 // assume a connection going south from row zero gets wired to shimdma
652 // by a shimmux.
653 if (switchboxOp.rowIndex() == 0 &&
654 masterSetOp.getDestBundle() == WireBundle::South)
655 keepHeader = false;
656
657 // "keep_pkt_header" attribute overrides the above defaults, if set
658 if (auto keep = masterSetOp.getKeepPktHeader())
659 keepHeader = *keep;
660
661 auto dropHeader =
662 keepHeader ? XAIE_SS_PKT_DONOT_DROP_HEADER : XAIE_SS_PKT_DROP_HEADER;
664 masterSetOp, XAie_StrmPktSwMstrPortEnable, &aiert->devInst, tileLoc,
665 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(masterSetOp.getDestBundle()),
666 masterSetOp.destIndex(), dropHeader, arbiter, mask);
667 }
668
669 for (auto packetRulesOp : b.getOps<PacketRulesOp>()) {
670 int slot = 0;
671 Block &block = packetRulesOp.getRules().front();
672 for (auto slotOp : block.getOps<PacketRuleOp>()) {
673 AMSelOp amselOp = cast<AMSelOp>(slotOp.getAmsel().getDefiningOp());
674 int arbiter = amselOp.arbiterIndex();
675 int msel = amselOp.getMselValue();
676 TRY_XAIE_API_EMIT_ERROR(packetRulesOp, XAie_StrmPktSwSlavePortEnable,
677 &aiert->devInst, tileLoc,
678 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(
679 packetRulesOp.getSourceBundle()),
680 packetRulesOp.sourceIndex());
681 auto packetInit = XAie_PacketInit(slotOp.valueInt(), /*PktType*/ 0);
682 // TODO Need to better define packet id,type used here
683 TRY_XAIE_API_EMIT_ERROR(packetRulesOp, XAie_StrmPktSwSlaveSlotEnable,
684 &aiert->devInst, tileLoc,
685 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(
686 packetRulesOp.getSourceBundle()),
687 packetRulesOp.sourceIndex(), slot, packetInit,
688 slotOp.maskInt(), msel, arbiter);
689 slot++;
690 }
691 }
692 }
693
694 for (auto muxOp : targetOp.getOps<ShimMuxOp>()) {
695 // NOTE ShimMux always connects from the south as directions are
696 // defined relative to the tile stream switch.
697 auto tileLoc =
698 XAie_TileLoc(muxOp.getTileOp().getCol(), muxOp.getTileOp().getRow());
699 Block &b = muxOp.getConnections().front();
700 for (auto connectOp : b.getOps<ConnectOp>()) {
701 // demux!
702 if (connectOp.getSourceBundle() == WireBundle::North)
703 TRY_XAIE_API_EMIT_ERROR(muxOp, XAie_EnableAieToShimDmaStrmPort,
704 &aiert->devInst, tileLoc,
705 connectOp.sourceIndex());
706 // mux
707 if (connectOp.getDestBundle() == WireBundle::North)
708 TRY_XAIE_API_EMIT_ERROR(muxOp, XAie_EnableShimDmaToAieStrmPort,
709 &aiert->devInst, tileLoc,
710 connectOp.destIndex());
711 }
712 }
713
714 for (auto switchboxOp : targetOp.getOps<ShimSwitchboxOp>()) {
715 Block &b = switchboxOp.getConnections().front();
716 auto tileLoc = XAie_TileLoc(switchboxOp.getCol(), 0);
717 for (auto connectOp : b.getOps<ConnectOp>())
719 switchboxOp, XAie_StrmConnCctEnable, &aiert->devInst, tileLoc,
720 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(connectOp.getSourceBundle()),
721 connectOp.sourceIndex(),
722 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(connectOp.getDestBundle()),
723 connectOp.destIndex());
724 }
725
726 // Cascade configuration
727 if (isa<AIE2TargetModel>(targetModel)) {
728 for (auto configOp : targetOp.getOps<ConfigureCascadeOp>()) {
729 TileOp tile = cast<TileOp>(configOp.getTile().getDefiningOp());
730 auto tileLoc = XAie_TileLoc(tile.getCol(), tile.getRow());
732 targetOp, XAie_CoreConfigAccumulatorControl, &aiert->devInst, tileLoc,
733 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(
734 static_cast<WireBundle>(configOp.getInputDir())),
735 WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE.at(
736 static_cast<WireBundle>(configOp.getOutputDir())));
737 }
738 }
739
740 return success();
741}
742
743LogicalResult xilinx::AIE::AIERTControl::addInitConfig(DeviceOp &targetOp) {
744
745 if (failed(initLocks(targetOp))) {
746 return failure();
747 }
748
749 if (failed(initBuffers(targetOp))) {
750 return failure();
751 }
752
753 auto memOps = llvm::to_vector_of<TileElement>(targetOp.getOps<MemOp>());
754 llvm::append_range(memOps, targetOp.getOps<MemTileDMAOp>());
755 llvm::append_range(memOps, targetOp.getOps<ShimDMAOp>());
756 for (TileElement memOp : memOps) {
757 int col = memOp.getTileID().col;
758 int row = memOp.getTileID().row;
759
760 // Get the region's entry block, then start traversing through the chain of
761 // blocks.
762 llvm::SetVector<Block *> blockVector =
763 getOrderedChainOfBlocks(&memOp.getOperation()->getRegion(0));
764
765 // handle DMA ops separately
766 auto dmaOps = llvm::to_vector_of<DMAOp>(
767 memOp.getOperation()->getRegion(0).getOps<DMAOp>());
768 if (!dmaOps.empty()) {
769 for (auto dmaOp : dmaOps)
770 for (auto &bdRegion : dmaOp.getBds()) {
771 Block &block = bdRegion.getBlocks().front();
772 if (failed(configureLocksAndBd(block, col, row)))
773 return failure();
774 }
775 } else {
776 for (Block *block : blockVector) {
777 if (block->getOps<DMABDOp>().empty())
778 continue;
779 if (failed(configureLocksAndBd(*block, col, row)))
780 return failure();
781 }
782 }
783
784 if (!dmaOps.empty())
785 for (auto dmaOp : dmaOps) {
786 auto &block = dmaOp.getBds().front().getBlocks().front();
787 DMABDOp bd = *block.getOps<DMABDOp>().begin();
788 if (failed(pushToBdQueueAndEnable(
789 *dmaOp.getOperation(), col, row, dmaOp.getChannelIndex(),
790 dmaOp.getChannelDir(), bd.getBdId().value(),
791 dmaOp.getRepeatCount())))
792 return failure();
793 }
794 else
795 for (Block *block : blockVector) {
796 for (auto op : block->getOps<DMAStartOp>()) {
797 DMABDOp bd = *op.getDest()->getOps<DMABDOp>().begin();
798 int chNum = op.getChannelIndex();
799 auto channelDir = op.getChannelDir();
800 if (failed(pushToBdQueueAndEnable(*bd.getOperation(), col, row, chNum,
801 channelDir, bd.getBdId().value(),
802 op.getRepeatCount())))
803 return failure();
804 }
805 }
806 }
807
808 if (failed(configureSwitches(targetOp))) {
809 return failure();
810 }
811
812 return success();
813}
814
815LogicalResult xilinx::AIE::AIERTControl::addCoreEnable(DeviceOp &targetOp) {
816 // Start execution of all the cores.
817 for (auto tileOp : targetOp.getOps<TileOp>()) {
818 auto tileLoc = XAie_TileLoc(tileOp.colIndex(), tileOp.rowIndex());
819 if (!tileOp.isShimTile() && tileOp.getCoreOp())
820 TRY_XAIE_API_EMIT_ERROR(targetOp, XAie_CoreEnable, &aiert->devInst,
821 tileLoc);
822 }
823 return success();
824}
825
826LogicalResult xilinx::AIE::AIERTControl::addAieElf(uint8_t col, uint8_t row,
827 const StringRef elfPath,
828 bool aieSim) {
829 TRY_XAIE_API_LOGICAL_RESULT(XAie_CoreDisable, &aiert->devInst,
830 XAie_TileLoc(col, row));
831 TRY_XAIE_API_LOGICAL_RESULT(XAie_DmaChannelResetAll, &aiert->devInst,
832 XAie_TileLoc(col, row),
833 XAie_DmaChReset::DMA_CHANNEL_RESET);
834
835 // loadSym: Load symbols from .map file. This argument is not used when
836 // __AIESIM__ is not defined.
837 TRY_XAIE_API_LOGICAL_RESULT(XAie_LoadElf, &aiert->devInst,
838 XAie_TileLoc(col, row), elfPath.str().c_str(),
839 /*loadSym*/ aieSim);
840
841 TRY_XAIE_API_LOGICAL_RESULT(XAie_DmaChannelResetAll, &aiert->devInst,
842 XAie_TileLoc(col, row),
843 XAie_DmaChReset::DMA_CHANNEL_UNRESET);
844
845 return success();
846}
847
849 TRY_XAIE_API_LOGICAL_RESULT(XAie_ResetPartition, &aiert->devInst);
850 return success();
851}
852
853LogicalResult xilinx::AIE::AIERTControl::resetDMA(int col, int row, bool on) {
854 auto tileLoc = XAie_TileLoc(col, row);
855 XAie_DmaDesc dmaTileBd;
856 TRY_XAIE_API_LOGICAL_RESULT(XAie_DmaDescInit, &aiert->devInst, &dmaTileBd,
857 tileLoc);
858 TRY_XAIE_API_LOGICAL_RESULT(XAie_DmaDisableBd, &dmaTileBd);
859 TRY_XAIE_API_LOGICAL_RESULT(XAie_DmaChannelResetAll, &aiert->devInst, tileLoc,
860 on ? XAie_DmaChReset::DMA_CHANNEL_UNRESET
861 : XAie_DmaChReset::DMA_CHANNEL_RESET);
862 return success();
863}
864
866 auto tileLoc = XAie_TileLoc(col, row);
867 TRY_XAIE_API_LOGICAL_RESULT(XAie_CoreReset, &aiert->devInst, tileLoc);
868 return success();
869}
870
872 XAie_LocType tileLoc = XAie_TileLoc(col, row);
873
874 // Reset all combinations of input/output routing in the switchbox
875 for (auto endpoint_a : WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE) {
876 for (auto endpoint_b : WIRE_BUNDLE_TO_STRM_SW_PORT_TYPE) {
877 unsigned n_a_connections = targetModel.getNumSourceSwitchboxConnections(
878 col, row, endpoint_a.first);
879 unsigned n_b_connections = targetModel.getNumDestSwitchboxConnections(
880 col, row, endpoint_b.first);
881 for (unsigned a_index = 0; a_index < n_a_connections; a_index++) {
882 for (unsigned b_index = 0; b_index < n_b_connections; b_index++) {
883 if (!targetModel.isLegalTileConnection(col, row, endpoint_a.first,
884 a_index, endpoint_b.first,
885 b_index)) {
886 continue;
887 }
888 TRY_XAIE_API_FATAL_ERROR(XAie_StrmConnCctDisable, &aiert->devInst,
889 tileLoc, endpoint_a.second, a_index,
890 endpoint_b.second, b_index);
891 }
892 }
893 }
894 }
895
896 return success();
897}
898
900 auto tileLoc = XAie_TileLoc(col, row);
901 TRY_XAIE_API_LOGICAL_RESULT(XAie_CoreUnreset, &aiert->devInst, tileLoc);
902 return success();
903}
904
906 int lockId) {
907 auto tileLoc = XAie_TileLoc(col, row);
908 // Reset a single lock to value 0
909 XAie_Lock lock;
910 lock.LockId = lockId;
911 lock.LockVal = 0;
912 TRY_XAIE_API_LOGICAL_RESULT(XAie_LockSetValue, &aiert->devInst, tileLoc,
913 lock);
914 return success();
915}
916
918 int col, int row, WireBundle sourceBundle, int sourceChannel,
919 WireBundle destBundle, int destChannel) {
920 auto tileLoc = XAie_TileLoc(col, row);
921
922 // Helper lambda to map WireBundle to StrmSwPortType
923 auto mapBundle = [](WireBundle bundle) -> StrmSwPortType {
924 switch (bundle) {
925 case WireBundle::Core:
926 return CORE;
927 case WireBundle::DMA:
928 return DMA;
929 case WireBundle::FIFO:
930 return FIFO;
931 case WireBundle::South:
932 return SOUTH;
933 case WireBundle::West:
934 return WEST;
935 case WireBundle::North:
936 return NORTH;
937 case WireBundle::East:
938 return EAST;
939 case WireBundle::Trace:
940 return TRACE;
941 default:
942 return SOUTH;
943 }
944 };
945
946 StrmSwPortType sourcePortType = mapBundle(sourceBundle);
947 StrmSwPortType destPortType = mapBundle(destBundle);
948
949 // Disconnect the specific connection from source to destination
950 TRY_XAIE_API_LOGICAL_RESULT(XAie_StrmConnCctDisable, &aiert->devInst, tileLoc,
951 sourcePortType, sourceChannel, destPortType,
952 destChannel);
953
954 return success();
955}
956
958 auto tileLoc = XAie_TileLoc(col, row);
959 // Reset performance counters in all modules
960 // Try core module counters (if applicable)
961 for (int counterId = 0; counterId < 4; counterId++) {
962 // Ignore errors as not all tiles have all counter types
963 (void)XAie_PerfCounterReset(&aiert->devInst, tileLoc, XAIE_CORE_MOD,
964 counterId);
965 }
966 // Try mem module counters
967 for (int counterId = 0; counterId < 4; counterId++) {
968 (void)XAie_PerfCounterReset(&aiert->devInst, tileLoc, XAIE_MEM_MOD,
969 counterId);
970 }
971 // Try PL module counters (for shim tiles)
972 for (int counterId = 0; counterId < 4; counterId++) {
973 (void)XAie_PerfCounterReset(&aiert->devInst, tileLoc, XAIE_PL_MOD,
974 counterId);
975 }
976 return success();
977}
978
979LogicalResult xilinx::AIE::AIERTControl::addAieElfs(DeviceOp &targetOp,
980 const StringRef elfPath,
981 bool aieSim) {
982 for (auto tileOp : targetOp.getOps<TileOp>())
983 if (tileOp.isShimNOCorPLTile()) {
984 // Resets no needed with V2 kernel driver
985 } else {
986 int col = tileOp.colIndex();
987 int row = tileOp.rowIndex();
988 if (auto coreOp = tileOp.getCoreOp()) {
989 std::string fileName;
990 if (auto fileAttr = coreOp.getElfFile()) {
991 fileName = fileAttr->str();
992 } else {
993 // Skip cores without elf_file (e.g., lightweight reset
994 // devices that only need DMA/lock reconfiguration).
995 continue;
996 }
997 // Check if fileName is already an absolute path.
998 // If so, use it directly. Otherwise, concatenate with elfPath.
999 std::string fullPath;
1000 if (std::filesystem::path(fileName).is_absolute()) {
1001 fullPath = fileName;
1002 } else {
1003 auto ps = std::filesystem::path::preferred_separator;
1004 fullPath =
1005 (llvm::Twine(elfPath) + std::string(1, ps) + fileName).str();
1006 }
1007 if (failed(addAieElf(col, row, fullPath, aieSim)))
1008 return failure();
1009 }
1010 }
1011 return success();
1012}
1013
1015 size_t bdId) {
1016 auto tileLoc = XAie_TileLoc(col, row);
1017 TRY_XAIE_API_FATAL_ERROR(XAie_DmaUpdateBdAddr, &aiert->devInst, tileLoc, addr,
1018 bdId);
1019}
1020
1022 TRY_XAIE_API_FATAL_ERROR(XAie_StartTransaction, &aiert->devInst,
1023 XAIE_TRANSACTION_DISABLE_AUTO_FLUSH);
1024}
1025
1027 // Export the transactions to a binary buffer
1028 uint8_t *txn_ptr = XAie_ExportSerializedTransaction(&aiert->devInst, 0, 0);
1029 XAie_TxnHeader *hdr = (XAie_TxnHeader *)txn_ptr;
1030 std::vector<uint8_t> txn_data(txn_ptr, txn_ptr + hdr->TxnSize);
1031 return txn_data;
1032}
#define XAIE_SHIM_ROW
Definition AIERT.cpp:190
llvm::raw_ostream & showAIEXRTArgs(llvm::raw_ostream &out, const char *label, H1 &&value)
Definition AIERT.cpp:84
LogicalResult configureLocksInBdBlock(const AIE::AIETargetModel &targetModel, XAie_DmaDesc &dmaTileBd, Block &block, int col, int row)
Definition AIERT.cpp:275
#define XAIE_PARTITION_BASE_ADDR
Definition AIERT.cpp:192
#define TRY_XAIE_API_LOGICAL_RESULT(API,...)
Definition AIERT.cpp:129
#define NPI_ADDR
Definition AIERT.cpp:194
#define NUM_LOCKS
Definition AIERT.cpp:195
#define TRY_XAIE_API_FATAL_ERROR(API,...)
Definition AIERT.cpp:110
llvm::raw_ostream & operator<<(llvm::raw_ostream &os, const XAie_LocType &loc)
Definition AIERT.cpp:170
#define XAIE_BASE_ADDR
Definition AIERT.cpp:189
#define TRY_XAIE_API_EMIT_ERROR(OP, API,...)
Definition AIERT.cpp:120
#define XAIE_MEM_TILE_ROW_START
Definition AIERT.cpp:191
#define AIERC_STR(x)
Definition AIERT.cpp:34
LogicalResult configureBdInBlock(const AIE::AIETargetModel &targetModel, XAie_DevInst *devInst, XAie_DmaDesc &dmaTileBd, Block &block, int col, int row, int bdId, std::optional< int > nextBdId)
Definition AIERT.cpp:328
std::optional< uint32_t > getMemLocalBaseAddress(int localCol, int localRow, int memCol, int memRow) const
Return the memory base address (or offset) in the local tile when accessing a neighbor's memory or an...
virtual AIEArch getTargetArch() const =0
Return the target architecture.
bool isMemTile(int col, int row) const
Return true if the given tile is a Mem tile.
std::optional< uint32_t > getLockLocalBaseIndex(int localCol, int localRow, int lockCol, int lockRow) const
Return the lock base index (or offset) in the local tile when accessing a neighbor's lock or an empty...
virtual int rows() const =0
Return the number of rows in the device.
bool isShimNOCTile(int col, int row) const
Return true if the given tile is a ShimNOC tile.
virtual uint32_t getColumnShift() const =0
bool hasProperty(ModelProperty Prop) const
virtual int columns() const =0
Return the number of columns in the device.
virtual uint32_t getNumMemTileRows() const =0
virtual uint32_t getRowShift() const =0
uint32_t getShimBurstLengthBytes(const AIE::AIETargetModel &tm, uint32_t burstLength)
llvm::SetVector< mlir::Block * > getOrderedChainOfBlocks(mlir::Region *region)
mlir::LogicalResult resetPerfCounters(int col, int row)
Definition AIERT.cpp:957
mlir::LogicalResult resetCore(int col, int row)
Definition AIERT.cpp:865
mlir::LogicalResult pushToBdQueueAndEnable(mlir::Operation &op, int col, int row, int chNum, const DMAChannelDir &channelDir, int bdId, int repeatCount)
Definition AIERT.cpp:494
void dmaUpdateBdAddr(int col, int row, size_t addr, size_t bdId)
Definition AIERT.cpp:1014
mlir::LogicalResult resetPartition()
Definition AIERT.cpp:848
mlir::LogicalResult addAieElfs(DeviceOp &targetOp, const mlir::StringRef workDirPath, bool aieSim)
Definition AIERT.cpp:979
mlir::LogicalResult resetCoreUnreset(int col, int row)
Definition AIERT.cpp:899
mlir::LogicalResult setIOBackend(bool aieSim, bool xaieDebug)
Definition AIERT.cpp:257
std::vector< uint8_t > exportSerializedTransaction()
Definition AIERT.cpp:1026
AIERTControl(const xilinx::AIE::AIETargetModel &tm)
Definition AIERT.cpp:206
mlir::LogicalResult initLocks(DeviceOp &targetOp)
Definition AIERT.cpp:533
mlir::LogicalResult resetDMA(int col, int row, bool on)
Definition AIERT.cpp:853
mlir::LogicalResult configureSwitches(DeviceOp &targetOp)
Definition AIERT.cpp:616
mlir::LogicalResult initBuffers(DeviceOp &targetOp)
Definition AIERT.cpp:564
mlir::LogicalResult resetSwitch(int col, int row)
Definition AIERT.cpp:871
mlir::LogicalResult addCoreEnable(DeviceOp &targetOp)
Definition AIERT.cpp:815
mlir::LogicalResult resetSwitchConnection(int col, int row, WireBundle sourceBundle, int sourceChannel, WireBundle destBundle, int destChannel)
Definition AIERT.cpp:917
mlir::LogicalResult addAieElf(uint8_t col, uint8_t row, const mlir::StringRef elfPath, bool aieSim)
Definition AIERT.cpp:826
mlir::LogicalResult configureLocksAndBd(mlir::Block &block, int col, int row)
Definition AIERT.cpp:512
mlir::LogicalResult addInitConfig(DeviceOp &targetOp)
Definition AIERT.cpp:743
mlir::LogicalResult resetLock(int col, int row, int lockId)
Definition AIERT.cpp:905