Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2021 HiSilicon Limited
3 : : * Copyright(c) 2021 Intel Corporation
4 : : * Copyright(c) 2021 Marvell International Ltd
5 : : * Copyright(c) 2021 SmartShare Systems
6 : : */
7 : :
8 : : #ifndef RTE_DMADEV_H
9 : : #define RTE_DMADEV_H
10 : :
11 : : /**
12 : : * @file rte_dmadev.h
13 : : *
14 : : * DMA (Direct Memory Access) device API.
15 : : *
16 : : * The DMA framework is built on the following model:
17 : : *
18 : : * --------------- --------------- ---------------
19 : : * | virtual DMA | | virtual DMA | | virtual DMA |
20 : : * | channel | | channel | | channel |
21 : : * --------------- --------------- ---------------
22 : : * | | |
23 : : * ------------------ |
24 : : * | |
25 : : * ------------ ------------
26 : : * | dmadev | | dmadev |
27 : : * ------------ ------------
28 : : * | |
29 : : * ------------------ ------------------
30 : : * | HW DMA channel | | HW DMA channel |
31 : : * ------------------ ------------------
32 : : * | |
33 : : * --------------------------------
34 : : * |
35 : : * ---------------------
36 : : * | HW DMA Controller |
37 : : * ---------------------
38 : : *
39 : : * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
40 : : * each HW-DMA-channel should be represented by a dmadev.
41 : : *
42 : : * The dmadev could create multiple virtual DMA channels, each virtual DMA
43 : : * channel represents a different transfer context. The DMA operation request
44 : : * must be submitted to the virtual DMA channel. e.g. Application could create
45 : : * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
46 : : * virtual DMA channel 1 for memory-to-device transfer scenario.
47 : : *
48 : : * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
49 : : * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
50 : : *
51 : : * The functions exported by the dmadev API to setup a device designated by its
52 : : * device identifier must be invoked in the following order:
53 : : * - rte_dma_configure()
54 : : * - rte_dma_vchan_setup()
55 : : * - rte_dma_start()
56 : : *
57 : : * Then, the application can invoke dataplane functions to process jobs.
58 : : *
59 : : * If the application wants to change the configuration (i.e. invoke
60 : : * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
61 : : * rte_dma_stop() first to stop the device and then do the reconfiguration
62 : : * before invoking rte_dma_start() again. The dataplane functions should not
63 : : * be invoked when the device is stopped.
64 : : *
65 : : * Finally, an application can close a dmadev by invoking the rte_dma_close()
66 : : * function.
67 : : *
68 : : * The dataplane APIs include two parts:
69 : : * The first part is the submission of operation requests:
70 : : * - rte_dma_copy()
71 : : * - rte_dma_copy_sg()
72 : : * - rte_dma_fill()
73 : : * - rte_dma_submit()
74 : : *
75 : : * These APIs could work with different virtual DMA channels which have
76 : : * different contexts.
77 : : *
78 : : * The first three APIs are used to submit the operation request to the virtual
79 : : * DMA channel, if the submission is successful, a positive
80 : : * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned.
81 : : *
82 : : * The last API is used to issue doorbell to hardware, and also there are flags
83 : : * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
84 : : * same work.
85 : : * @note When enqueuing a set of jobs to the device, having a separate submit
86 : : * outside a loop makes for clearer code than having a check for the last
87 : : * iteration inside the loop to set a special submit flag. However, for cases
88 : : * where one item alone is to be submitted or there is a small set of jobs to
89 : : * be submitted sequentially, having a submit flag provides a lower-overhead
90 : : * way of doing the submission while still keeping the code clean.
91 : : *
92 : : * The second part is to obtain the result of requests:
93 : : * - rte_dma_completed()
94 : : * - return the number of operation requests completed successfully.
95 : : * - rte_dma_completed_status()
96 : : * - return the number of operation requests completed.
97 : : *
98 : : * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT),
99 : : * application does not invoke the above two completed APIs.
100 : : *
101 : : * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill())
102 : : * return, the rules are as follows:
103 : : * - ring_idx for each virtual DMA channel are independent.
104 : : * - For a virtual DMA channel, the ring_idx is monotonically incremented,
105 : : * when it reach UINT16_MAX, it wraps back to zero.
106 : : * - This ring_idx can be used by applications to track per-operation
107 : : * metadata in an application-defined circular ring.
108 : : * - The initial ring_idx of a virtual DMA channel is zero, after the
109 : : * device is stopped, the ring_idx needs to be reset to zero.
110 : : *
111 : : * One example:
112 : : * - step-1: start one dmadev
113 : : * - step-2: enqueue a copy operation, the ring_idx return is 0
114 : : * - step-3: enqueue a copy operation again, the ring_idx return is 1
115 : : * - ...
116 : : * - step-101: stop the dmadev
117 : : * - step-102: start the dmadev
118 : : * - step-103: enqueue a copy operation, the ring_idx return is 0
119 : : * - ...
120 : : * - step-x+0: enqueue a fill operation, the ring_idx return is 65535
121 : : * - step-x+1: enqueue a copy operation, the ring_idx return is 0
122 : : * - ...
123 : : *
124 : : * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(),
125 : : * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type.
126 : : *
127 : : * The dmadev supports two types of address: memory address and device address.
128 : : *
129 : : * - memory address: the source and destination address of the memory-to-memory
130 : : * transfer type, or the source address of the memory-to-device transfer type,
131 : : * or the destination address of the device-to-memory transfer type.
132 : : * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address
133 : : * can be any VA address, otherwise it must be an IOVA address.
134 : : *
135 : : * - device address: the source and destination address of the device-to-device
136 : : * transfer type, or the source address of the device-to-memory transfer type,
137 : : * or the destination address of the memory-to-device transfer type.
138 : : *
139 : : * About MT-safe, all the functions of the dmadev API implemented by a PMD are
140 : : * lock-free functions which assume to not be invoked in parallel on different
141 : : * logical cores to work on the same target dmadev object.
142 : : * @note Different virtual DMA channels on the same dmadev *DO NOT* support
143 : : * parallel invocation because these virtual DMA channels share the same
144 : : * HW-DMA-channel.
145 : : */
146 : :
147 : : #include <stdint.h>
148 : : #include <errno.h>
149 : :
150 : : #include <rte_bitops.h>
151 : : #include <rte_common.h>
152 : : #include <rte_uuid.h>
153 : :
154 : : #ifdef __cplusplus
155 : : extern "C" {
156 : : #endif
157 : :
158 : : /** Maximum number of devices if rte_dma_dev_max() is not called. */
159 : : #define RTE_DMADEV_DEFAULT_MAX 64
160 : :
161 : : /**
162 : : * Configure the maximum number of dmadevs.
163 : : * @note This function can be invoked before the primary process rte_eal_init()
164 : : * to change the maximum number of dmadevs. If not invoked, the maximum number
165 : : * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX
166 : : *
167 : : * @param dev_max
168 : : * maximum number of dmadevs.
169 : : *
170 : : * @return
171 : : * 0 on success. Otherwise negative value is returned.
172 : : */
173 : : int rte_dma_dev_max(size_t dev_max);
174 : :
175 : : /**
176 : : * Get the device identifier for the named DMA device.
177 : : *
178 : : * @param name
179 : : * DMA device name.
180 : : *
181 : : * @return
182 : : * Returns DMA device identifier on success.
183 : : * - <0: Failure to find named DMA device.
184 : : */
185 : : int rte_dma_get_dev_id_by_name(const char *name);
186 : :
187 : : /**
188 : : * Check whether the dev_id is valid.
189 : : *
190 : : * @param dev_id
191 : : * DMA device index.
192 : : *
193 : : * @return
194 : : * - If the device index is valid (true) or not (false).
195 : : */
196 : : bool rte_dma_is_valid(int16_t dev_id);
197 : :
198 : : /**
199 : : * Get the total number of DMA devices that have been successfully
200 : : * initialised.
201 : : *
202 : : * @return
203 : : * The total number of usable DMA devices.
204 : : */
205 : : uint16_t rte_dma_count_avail(void);
206 : :
207 : : /**
208 : : * Iterates over valid dmadev instances.
209 : : *
210 : : * @param start_dev_id
211 : : * The id of the next possible dmadev.
212 : : * @return
213 : : * Next valid dmadev, UINT16_MAX if there is none.
214 : : */
215 : : int16_t rte_dma_next_dev(int16_t start_dev_id);
216 : :
217 : : /** Utility macro to iterate over all available dmadevs */
218 : : #define RTE_DMA_FOREACH_DEV(p) \
219 : : for (p = rte_dma_next_dev(0); \
220 : : p != -1; \
221 : : p = rte_dma_next_dev(p + 1))
222 : :
223 : :
224 : : /**@{@name DMA capability
225 : : * @see struct rte_dma_info::dev_capa
226 : : */
227 : : /** Support memory-to-memory transfer */
228 : : #define RTE_DMA_CAPA_MEM_TO_MEM RTE_BIT64(0)
229 : : /** Support memory-to-device transfer. */
230 : : #define RTE_DMA_CAPA_MEM_TO_DEV RTE_BIT64(1)
231 : : /** Support device-to-memory transfer. */
232 : : #define RTE_DMA_CAPA_DEV_TO_MEM RTE_BIT64(2)
233 : : /** Support device-to-device transfer. */
234 : : #define RTE_DMA_CAPA_DEV_TO_DEV RTE_BIT64(3)
235 : : /** Support SVA which could use VA as DMA address.
236 : : * If device support SVA then application could pass any VA address like memory
237 : : * from rte_malloc(), rte_memzone(), malloc, stack memory.
238 : : * If device don't support SVA, then application should pass IOVA address which
239 : : * from rte_malloc(), rte_memzone().
240 : : */
241 : : #define RTE_DMA_CAPA_SVA RTE_BIT64(4)
242 : : /** Support work in silent mode.
243 : : * In this mode, application don't required to invoke rte_dma_completed*()
244 : : * API.
245 : : * @see struct rte_dma_conf::silent_mode
246 : : */
247 : : #define RTE_DMA_CAPA_SILENT RTE_BIT64(5)
248 : : /** Supports error handling
249 : : *
250 : : * With this bit set, invalid input addresses will be reported as operation failures
251 : : * to the user but other operations can continue.
252 : : * Without this bit set, invalid data is not handled by either HW or driver, so user
253 : : * must ensure that all memory addresses are valid and accessible by HW.
254 : : */
255 : : #define RTE_DMA_CAPA_HANDLES_ERRORS RTE_BIT64(6)
256 : : /** Support auto free for source buffer once mem to dev transfer is completed.
257 : : *
258 : : * @note Even though the DMA driver has this capability, it may not support all
259 : : * mempool drivers. If the mempool is not supported by the DMA driver,
260 : : * rte_dma_vchan_setup() will fail.
261 : : */
262 : : #define RTE_DMA_CAPA_M2D_AUTO_FREE RTE_BIT64(7)
263 : : /** Support strict priority scheduling.
264 : : *
265 : : * Application could assign fixed priority to the DMA device using 'priority'
266 : : * field in struct rte_dma_conf. Number of supported priority levels will be
267 : : * known from 'nb_priorities' field in struct rte_dma_info.
268 : : */
269 : : #define RTE_DMA_CAPA_PRI_POLICY_SP RTE_BIT64(8)
270 : : /** Support inter-process DMA transfers.
271 : : *
272 : : * When this bit is set, the DMA device can perform memory transfers
273 : : * between different process memory spaces.
274 : : */
275 : : #define RTE_DMA_CAPA_INTER_PROCESS_DOMAIN RTE_BIT64(9)
276 : : /** Support inter-OS domain DMA transfers.
277 : : *
278 : : * The DMA device can perform memory transfers
279 : : * across different operating system domains.
280 : : */
281 : : #define RTE_DMA_CAPA_INTER_OS_DOMAIN RTE_BIT64(10)
282 : :
283 : : /** Support copy operation.
284 : : * This capability start with index of 32, so that it could leave gap between
285 : : * normal capability and ops capability.
286 : : */
287 : : #define RTE_DMA_CAPA_OPS_COPY RTE_BIT64(32)
288 : : /** Support scatter-gather list copy operation. */
289 : : #define RTE_DMA_CAPA_OPS_COPY_SG RTE_BIT64(33)
290 : : /** Support fill operation. */
291 : : #define RTE_DMA_CAPA_OPS_FILL RTE_BIT64(34)
292 : : /** Support enqueue and dequeue operations. */
293 : : #define RTE_DMA_CAPA_OPS_ENQ_DEQ RTE_BIT64(35)
294 : : /**@}*/
295 : :
296 : : /** DMA device configuration flags.
297 : : * @see struct rte_dma_conf::flags
298 : : */
299 : : /** Operate in silent mode
300 : : * @see RTE_DMA_CAPA_SILENT
301 : : */
302 : : #define RTE_DMA_CFG_FLAG_SILENT RTE_BIT64(0)
303 : : /** Enable enqueue and dequeue operations
304 : : * @see RTE_DMA_CAPA_OPS_ENQ_DEQ
305 : : */
306 : : #define RTE_DMA_CFG_FLAG_ENQ_DEQ RTE_BIT64(1)
307 : :
308 : : /**
309 : : * A structure used to retrieve the information of a DMA device.
310 : : *
311 : : * @see rte_dma_info_get
312 : : */
313 : : struct rte_dma_info {
314 : : const char *dev_name; /**< Unique device name. */
315 : : /** Device capabilities (RTE_DMA_CAPA_*). */
316 : : uint64_t dev_capa;
317 : : /** Maximum number of virtual DMA channels supported. */
318 : : uint16_t max_vchans;
319 : : /** Maximum allowed number of virtual DMA channel descriptors. */
320 : : uint16_t max_desc;
321 : : /** Minimum allowed number of virtual DMA channel descriptors. */
322 : : uint16_t min_desc;
323 : : /** Maximum number of source or destination scatter-gather entry
324 : : * supported.
325 : : * If the device does not support COPY_SG capability, this value can be
326 : : * zero.
327 : : * If the device supports COPY_SG capability, then rte_dma_copy_sg()
328 : : * parameter nb_src/nb_dst should not exceed this value.
329 : : */
330 : : uint16_t max_sges;
331 : : /** NUMA node connection, -1 if unknown. */
332 : : int16_t numa_node;
333 : : /** Number of virtual DMA channel configured. */
334 : : uint16_t nb_vchans;
335 : : /** Number of priority levels (must be > 1) if priority scheduling is supported,
336 : : * 0 otherwise.
337 : : */
338 : : uint16_t nb_priorities;
339 : : };
340 : :
341 : : /**
342 : : * Retrieve information of a DMA device.
343 : : *
344 : : * @param dev_id
345 : : * The identifier of the device.
346 : : * @param[out] dev_info
347 : : * A pointer to a structure of type *rte_dma_info* to be filled with the
348 : : * information of the device.
349 : : *
350 : : * @return
351 : : * 0 on success. Otherwise negative value is returned.
352 : : */
353 : : int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
354 : :
355 : : /**
356 : : * A structure used to configure a DMA device.
357 : : *
358 : : * @see rte_dma_configure
359 : : */
360 : : struct rte_dma_conf {
361 : : /** The number of virtual DMA channels to set up for the DMA device.
362 : : * This value cannot be greater than the field 'max_vchans' of struct
363 : : * rte_dma_info which get from rte_dma_info_get().
364 : : */
365 : : uint16_t nb_vchans;
366 : : /* The priority of the DMA device.
367 : : * This value should be lower than the field 'nb_priorities' of struct
368 : : * rte_dma_info which get from rte_dma_info_get(). If the DMA device
369 : : * does not support priority scheduling, this value should be zero.
370 : : *
371 : : * Lowest value indicates higher priority and vice-versa.
372 : : */
373 : : uint16_t priority;
374 : : /** DMA device configuration flags defined as RTE_DMA_CFG_FLAG_*. */
375 : : uint64_t flags;
376 : : };
377 : :
378 : : /**
379 : : * Configure a DMA device.
380 : : *
381 : : * This function must be invoked first before any other function in the
382 : : * API. This function can also be re-invoked when a device is in the
383 : : * stopped state.
384 : : *
385 : : * @param dev_id
386 : : * The identifier of the device to configure.
387 : : * @param dev_conf
388 : : * The DMA device configuration structure encapsulated into rte_dma_conf
389 : : * object.
390 : : *
391 : : * @return
392 : : * 0 on success. Otherwise negative value is returned.
393 : : */
394 : : int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
395 : :
396 : : /**
397 : : * Start a DMA device.
398 : : *
399 : : * The device start step is the last one and consists of setting the DMA
400 : : * to start accepting jobs.
401 : : *
402 : : * @param dev_id
403 : : * The identifier of the device.
404 : : *
405 : : * @return
406 : : * 0 on success. Otherwise negative value is returned.
407 : : */
408 : : int rte_dma_start(int16_t dev_id);
409 : :
410 : : /**
411 : : * Stop a DMA device.
412 : : *
413 : : * The device can be restarted with a call to rte_dma_start().
414 : : *
415 : : * @param dev_id
416 : : * The identifier of the device.
417 : : *
418 : : * @return
419 : : * 0 on success. Otherwise negative value is returned.
420 : : */
421 : : int rte_dma_stop(int16_t dev_id);
422 : :
423 : : /**
424 : : * Close a DMA device.
425 : : *
426 : : * The device cannot be restarted after this call.
427 : : *
428 : : * @param dev_id
429 : : * The identifier of the device.
430 : : *
431 : : * @return
432 : : * 0 on success. Otherwise negative value is returned.
433 : : */
434 : : int rte_dma_close(int16_t dev_id);
435 : :
436 : : /**
437 : : * DMA transfer direction defines.
438 : : *
439 : : * @see struct rte_dma_vchan_conf::direction
440 : : */
441 : : enum rte_dma_direction {
442 : : /** DMA transfer direction - from memory to memory.
443 : : * When the device supports inter-process or inter-OS domain transfers,
444 : : * the field `type` in `struct rte_dma_vchan_conf::domain`
445 : : * specifies the type of domain.
446 : : * For memory-to-memory transfers within the same domain or process,
447 : : * `type` should be set to `RTE_DMA_INTER_DOMAIN_NONE`.
448 : : *
449 : : * @see struct rte_dma_vchan_conf::direction
450 : : * @see struct rte_dma_inter_domain_param::type
451 : : */
452 : : RTE_DMA_DIR_MEM_TO_MEM,
453 : : /** DMA transfer direction - from memory to device.
454 : : * In a typical scenario, the SoCs are installed on host servers as
455 : : * iNICs through the PCIe interface. In this case, the SoCs works in
456 : : * EP(endpoint) mode, it could initiate a DMA move request from memory
457 : : * (which is SoCs memory) to device (which is host memory).
458 : : *
459 : : * @see struct rte_dma_vchan_conf::direction
460 : : */
461 : : RTE_DMA_DIR_MEM_TO_DEV,
462 : : /** DMA transfer direction - from device to memory.
463 : : * In a typical scenario, the SoCs are installed on host servers as
464 : : * iNICs through the PCIe interface. In this case, the SoCs works in
465 : : * EP(endpoint) mode, it could initiate a DMA move request from device
466 : : * (which is host memory) to memory (which is SoCs memory).
467 : : *
468 : : * @see struct rte_dma_vchan_conf::direction
469 : : */
470 : : RTE_DMA_DIR_DEV_TO_MEM,
471 : : /** DMA transfer direction - from device to device.
472 : : * In a typical scenario, the SoCs are installed on host servers as
473 : : * iNICs through the PCIe interface. In this case, the SoCs works in
474 : : * EP(endpoint) mode, it could initiate a DMA move request from device
475 : : * (which is host memory) to the device (which is another host memory).
476 : : *
477 : : * @see struct rte_dma_vchan_conf::direction
478 : : */
479 : : RTE_DMA_DIR_DEV_TO_DEV,
480 : : };
481 : :
482 : : /**
483 : : * DMA access port type defines.
484 : : *
485 : : * @see struct rte_dma_port_param::port_type
486 : : */
487 : : enum rte_dma_port_type {
488 : : RTE_DMA_PORT_NONE,
489 : : RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
490 : : };
491 : :
492 : : /**
493 : : * A structure used to descript DMA access port parameters.
494 : : *
495 : : * @see struct rte_dma_vchan_conf::src_port
496 : : * @see struct rte_dma_vchan_conf::dst_port
497 : : */
498 : : struct rte_dma_port_param {
499 : : /** The device access port type.
500 : : *
501 : : * @see enum rte_dma_port_type
502 : : */
503 : : enum rte_dma_port_type port_type;
504 : : union {
505 : : /** PCIe access port parameters.
506 : : *
507 : : * The following model shows SoC's PCIe module connects to
508 : : * multiple PCIe hosts and multiple endpoints. The PCIe module
509 : : * has an integrated DMA controller.
510 : : *
511 : : * If the DMA wants to access the memory of host A, it can be
512 : : * initiated by PF1 in core0, or by VF0 of PF0 in core0.
513 : : *
514 : : * \code{.unparsed}
515 : : * System Bus
516 : : * | ----------PCIe module----------
517 : : * | Bus
518 : : * | Interface
519 : : * | ----- ------------------
520 : : * | | | | PCIe Core0 |
521 : : * | | | | | -----------
522 : : * | | | | PF-0 -- VF-0 | | Host A |
523 : : * | | |--------| |- VF-1 |--------| Root |
524 : : * | | | | PF-1 | | Complex |
525 : : * | | | | PF-2 | -----------
526 : : * | | | ------------------
527 : : * | | |
528 : : * | | | ------------------
529 : : * | | | | PCIe Core1 |
530 : : * | | | | | -----------
531 : : * | | | | PF-0 -- VF-0 | | Host B |
532 : : * |-----| |--------| PF-1 -- VF-0 |--------| Root |
533 : : * | | | | |- VF-1 | | Complex |
534 : : * | | | | PF-2 | -----------
535 : : * | | | ------------------
536 : : * | | |
537 : : * | | | ------------------
538 : : * | |DMA| | | ------
539 : : * | | | | |--------| EP |
540 : : * | | |--------| PCIe Core2 | ------
541 : : * | | | | | ------
542 : : * | | | | |--------| EP |
543 : : * | | | | | ------
544 : : * | ----- ------------------
545 : : *
546 : : * \endcode
547 : : *
548 : : * @note If some fields can not be supported by the
549 : : * hardware/driver, then the driver ignores those fields.
550 : : * Please check driver-specific documentation for limitations
551 : : * and capabilities.
552 : : */
553 : : __extension__
554 : : union {
555 : : struct {
556 : : uint64_t coreid : 4; /**< PCIe core id used. */
557 : : uint64_t pfid : 8; /**< PF id used. */
558 : : uint64_t vfen : 1; /**< VF enable bit. */
559 : : uint64_t vfid : 16; /**< VF id used. */
560 : : /** The pasid filed in TLP packet. */
561 : : uint64_t pasid : 20;
562 : : /** The attributes filed in TLP packet. */
563 : : uint64_t attr : 3;
564 : : /** The processing hint filed in TLP packet. */
565 : : uint64_t ph : 2;
566 : : /** The steering tag filed in TLP packet. */
567 : : uint64_t st : 16;
568 : : };
569 : : uint64_t val;
570 : : } pcie;
571 : : };
572 : : uint64_t reserved[2]; /**< Reserved for future fields. */
573 : : };
574 : :
575 : : /**
576 : : * A structure used for offload auto free params.
577 : : */
578 : : struct rte_dma_auto_free_param {
579 : : union {
580 : : struct {
581 : : /**
582 : : * Mempool from which buffer is allocated. Mempool info
583 : : * is used for freeing buffer by hardware.
584 : : *
585 : : * @note If the mempool is not supported by the DMA device,
586 : : * rte_dma_vchan_setup() will fail.
587 : : */
588 : : struct rte_mempool *pool;
589 : : } m2d;
590 : : };
591 : : /** Reserved for future fields. */
592 : : uint64_t reserved[2];
593 : : };
594 : :
595 : : /**
596 : : * Inter-DMA transfer domain type.
597 : : *
598 : : * @warning
599 : : * @b EXPERIMENTAL: this API may change without prior notice.
600 : : *
601 : : * This enum defines the types of transfer domains applicable to DMA operations.
602 : : * It helps categorize whether a DMA transfer is occurring within the same domain,
603 : : * across different processes, or between distinct operating system domains.
604 : : *
605 : : * @see struct rte_dma_inter_domain_param:type
606 : : */
607 : : enum rte_dma_inter_domain_type {
608 : : /** No inter-domain transfer; standard DMA within same domain. */
609 : : RTE_DMA_INTER_DOMAIN_NONE,
610 : : /** Transfer occurs between different user-space processes. */
611 : : RTE_DMA_INTER_PROCESS_DOMAIN,
612 : : /** Transfer spans across different operating system domains. */
613 : : RTE_DMA_INTER_OS_DOMAIN,
614 : : };
615 : :
616 : : /**
617 : : * Parameters for inter-process or inter-OS DMA transfers.
618 : : *
619 : : * @warning
620 : : * @b EXPERIMENTAL: this API may change without prior notice.
621 : : *
622 : : * This structure defines the parameters required to perform DMA transfers
623 : : * across different domains, such as between processes or operating systems.
624 : : * It includes the domain type and handler identifiers
625 : : * for both the source and destination domains.
626 : : *
627 : : * When domain type is RTE_DMA_INTER_DOMAIN_NONE, both source and destination
628 : : * handlers are invalid, DMA operation is confined within the local process.
629 : : *
630 : : * For DMA transfers between the local process or OS domain to another process
631 : : * or OS domain, valid source and destination handlers must be provided.
632 : : */
633 : : struct rte_dma_inter_domain_param {
634 : : /** Type of inter-domain. */
635 : : enum rte_dma_inter_domain_type type;
636 : : /** Source domain handler identifier. */
637 : : uint16_t src_handler;
638 : : /** Destination domain handler identifier. */
639 : : uint16_t dst_handler;
640 : : /** Reserved for future fields. */
641 : : uint64_t reserved[2];
642 : : };
643 : :
644 : : /**
645 : : * A structure used to configure a virtual DMA channel.
646 : : *
647 : : * @see rte_dma_vchan_setup
648 : : */
649 : : struct rte_dma_vchan_conf {
650 : : /** Transfer direction
651 : : *
652 : : * @see enum rte_dma_direction
653 : : */
654 : : enum rte_dma_direction direction;
655 : : /** Number of descriptor for the virtual DMA channel */
656 : : uint16_t nb_desc;
657 : : /** 1) Used to describes the device access port parameter in the
658 : : * device-to-memory transfer scenario.
659 : : * 2) Used to describes the source device access port parameter in the
660 : : * device-to-device transfer scenario.
661 : : *
662 : : * @see struct rte_dma_port_param
663 : : */
664 : : struct rte_dma_port_param src_port;
665 : : /** 1) Used to describes the device access port parameter in the
666 : : * memory-to-device transfer scenario.
667 : : * 2) Used to describes the destination device access port parameter in
668 : : * the device-to-device transfer scenario.
669 : : *
670 : : * @see struct rte_dma_port_param
671 : : */
672 : : struct rte_dma_port_param dst_port;
673 : : /** Buffer params to auto free buffer by hardware. To free the buffer
674 : : * by hardware, RTE_DMA_OP_FLAG_AUTO_FREE must be set while calling
675 : : * rte_dma_copy and rte_dma_copy_sg().
676 : : *
677 : : * @see RTE_DMA_OP_FLAG_AUTO_FREE
678 : : * @see struct rte_dma_auto_free_param
679 : : */
680 : : struct rte_dma_auto_free_param auto_free;
681 : : /** Parameters for inter-process or inter-OS domain DMA transfers.
682 : : * This field specifies the source and destination domain handlers
683 : : * required for DMA operations that span
684 : : * across different processes or operating system domains.
685 : : *
686 : : * @see RTE_DMA_CAPA_INTER_PROCESS_DOMAIN
687 : : * @see RTE_DMA_CAPA_INTER_OS_DOMAIN
688 : : * @see struct rte_dma_inter_domain_param
689 : : */
690 : : struct rte_dma_inter_domain_param domain;
691 : : };
692 : :
693 : : /**
694 : : * Allocate and set up a virtual DMA channel.
695 : : *
696 : : * @param dev_id
697 : : * The identifier of the device.
698 : : * @param vchan
699 : : * The identifier of virtual DMA channel. The value must be in the range
700 : : * [0, nb_vchans - 1] previously supplied to rte_dma_configure().
701 : : * @param conf
702 : : * The virtual DMA channel configuration structure encapsulated into
703 : : * rte_dma_vchan_conf object.
704 : : *
705 : : * @return
706 : : * 0 on success. Otherwise negative value is returned.
707 : : */
708 : : int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
709 : : const struct rte_dma_vchan_conf *conf);
710 : :
711 : : /**
712 : : * A structure used to retrieve statistics.
713 : : *
714 : : * @see rte_dma_stats_get
715 : : */
716 : : struct rte_dma_stats {
717 : : /** Count of operations which were submitted to hardware. */
718 : : uint64_t submitted;
719 : : /** Count of operations which were completed, including successful and
720 : : * failed completions.
721 : : */
722 : : uint64_t completed;
723 : : /** Count of operations which failed to complete. */
724 : : uint64_t errors;
725 : : };
726 : :
727 : : /**
728 : : * Special ID, which is used to represent all virtual DMA channels.
729 : : *
730 : : * @see rte_dma_stats_get
731 : : * @see rte_dma_stats_reset
732 : : */
733 : : #define RTE_DMA_ALL_VCHAN 0xFFFFu
734 : :
735 : : /**
736 : : * Retrieve basic statistics of a or all virtual DMA channel(s).
737 : : *
738 : : * @param dev_id
739 : : * The identifier of the device.
740 : : * @param vchan
741 : : * The identifier of virtual DMA channel.
742 : : * If equal RTE_DMA_ALL_VCHAN means all channels.
743 : : * @param[out] stats
744 : : * The basic statistics structure encapsulated into rte_dma_stats
745 : : * object.
746 : : *
747 : : * @return
748 : : * 0 on success. Otherwise negative value is returned.
749 : : */
750 : : int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
751 : : struct rte_dma_stats *stats);
752 : :
753 : : /**
754 : : * Reset basic statistics of a or all virtual DMA channel(s).
755 : : *
756 : : * @param dev_id
757 : : * The identifier of the device.
758 : : * @param vchan
759 : : * The identifier of virtual DMA channel.
760 : : * If equal RTE_DMA_ALL_VCHAN means all channels.
761 : : *
762 : : * @return
763 : : * 0 on success. Otherwise negative value is returned.
764 : : */
765 : : int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
766 : :
767 : : /**
768 : : * device vchannel status
769 : : *
770 : : * Enum with the options for the channel status, either idle, active or halted due to error
771 : : * @see rte_dma_vchan_status
772 : : */
773 : : enum rte_dma_vchan_status {
774 : : RTE_DMA_VCHAN_IDLE, /**< not processing, awaiting ops */
775 : : RTE_DMA_VCHAN_ACTIVE, /**< currently processing jobs */
776 : : RTE_DMA_VCHAN_HALTED_ERROR, /**< not processing due to error, cannot accept new ops */
777 : : };
778 : :
779 : : /**
780 : : * Determine if all jobs have completed on a device channel.
781 : : * This function is primarily designed for testing use, as it allows a process to check if
782 : : * all jobs are completed, without actually gathering completions from those jobs.
783 : : *
784 : : * @param dev_id
785 : : * The identifier of the device.
786 : : * @param vchan
787 : : * The identifier of virtual DMA channel.
788 : : * @param[out] status
789 : : * The vchan status
790 : : * @return
791 : : * 0 - call completed successfully
792 : : * < 0 - error code indicating there was a problem calling the API
793 : : */
794 : : int
795 : : rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status);
796 : :
797 : : /**
798 : : * Dump DMA device info.
799 : : *
800 : : * @param dev_id
801 : : * The identifier of the device.
802 : : * @param f
803 : : * The file to write the output to.
804 : : *
805 : : * @return
806 : : * 0 on success. Otherwise negative value is returned.
807 : : */
808 : : int rte_dma_dump(int16_t dev_id, FILE *f);
809 : :
810 : : /**
811 : : * Event types for DMA access pair group notifications.
812 : : *
813 : : * @warning
814 : : * @b EXPERIMENTAL: this API may change without prior notice.
815 : : *
816 : : * When the event type is RTE_DMA_GROUP_EVENT_MEMBER_LEFT,
817 : : * the handler associated with the departing member's domain is no longer valid.
818 : : * Inter-domain DMA operations targeting that domain should be avoided.
819 : : *
820 : : * When the event type is RTE_DMA_GROUP_EVENT_GROUP_DESTROYED,
821 : : * all handlers associated with the group become invalid.
822 : : * No further inter-domain DMA operations should be initiated using those handlers.
823 : : */
824 : : enum rte_dma_access_pair_group_event_type {
825 : : /** A member left the group (notifies creator and joiners). */
826 : : RTE_DMA_GROUP_EVENT_MEMBER_LEFT,
827 : : /** Group was destroyed (notifies joiners). */
828 : : RTE_DMA_GROUP_EVENT_GROUP_DESTROYED
829 : : };
830 : :
831 : : /**
832 : : * This callback is used to notify interested parties
833 : : * (either the group creator or group joiners)
834 : : * about significant events related to the lifecycle of a DMA access pair group.
835 : : *
836 : : * @warning
837 : : * @b EXPERIMENTAL: this API may change without prior notice.
838 : : *
839 : : * It can be registered by:
840 : : * - **Group creators or group joiners** to be notified when a member leaves the group.
841 : : * - **Group joiners** to be notified when the group is destroyed.
842 : : *
843 : : * @param dev_id
844 : : * Identifier of the DMA device.
845 : : * @param group_id
846 : : * Identifier of the access pair group where the event occurred.
847 : : * @param domain_id
848 : : * UUID of the domain_id associated with the event.
849 : : * For member leave events, this is the domain_id of the member that left.
850 : : * For group destruction events,
851 : : * this may refer to the domain_id of the respective member.
852 : : * @param event
853 : : * Type of event that occurred.
854 : : * @see rte_dma_access_pair_group_event_type
855 : : */
856 : : typedef void (*rte_dma_access_pair_group_event_cb_t)(int16_t dev_id,
857 : : int16_t group_id,
858 : : rte_uuid_t domain_id,
859 : : enum rte_dma_access_pair_group_event_type event);
860 : :
861 : : /**
862 : : * Create an access pair group to enable secure DMA transfers
863 : : * between devices across different processes or operating system domains.
864 : : *
865 : : * @warning
866 : : * @b EXPERIMENTAL: this API may change without prior notice.
867 : : *
868 : : * @param dev_id
869 : : * Identifier of the DMA device initiating the group.
870 : : * @param domain_id
871 : : * Unique identifier representing the current process or OS domain.
872 : : * @param token
873 : : * Authentication token used to establish the access group.
874 : : * @param[out] group_id
875 : : * Pointer to store the ID of the newly created access group.
876 : : * @param cb
877 : : * Callback function to be invoked when a member leaves the group.
878 : : *
879 : : * @return
880 : : * 0 on success,
881 : : * negative error code on failure.
882 : : */
883 : : __rte_experimental
884 : : int rte_dma_access_pair_group_create(int16_t dev_id, rte_uuid_t domain_id, rte_uuid_t token,
885 : : int16_t *group_id, rte_dma_access_pair_group_event_cb_t cb);
886 : :
887 : : /**
888 : : * Destroy an access pair group if all participating devices have exited.
889 : : *
890 : : * @warning
891 : : * @b EXPERIMENTAL: this API may change without prior notice.
892 : : *
893 : : * This operation is only permitted by the device that originally created the group;
894 : : * attempts by other devices will result in failure.
895 : : *
896 : : * @param dev_id
897 : : * Identifier of the device requesting group destruction.
898 : : * @param group_id
899 : : * ID of the access group to be destroyed.
900 : : * @return
901 : : * 0 on success,
902 : : * negative value on failure indicating the error code.
903 : : */
904 : : __rte_experimental
905 : : int rte_dma_access_pair_group_destroy(int16_t dev_id, int16_t group_id);
906 : :
907 : : /**
908 : : * Join an existing access group to enable secure DMA transfers
909 : : * between devices across different processes or OS domains.
910 : : *
911 : : * @warning
912 : : * @b EXPERIMENTAL: this API may change without prior notice.
913 : : *
914 : : * @param dev_id
915 : : * Identifier of the DMA device attempting to join the group.
916 : : * @param domain_id
917 : : * Unique identifier representing the current process or OS domain.
918 : : * @param token
919 : : * Authentication token used to validate group membership.
920 : : * @param group_id
921 : : * ID of the access group to join.
922 : : * @param cb
923 : : * Callback function to be invoked when the device leaves the group
924 : : * or when the group is destroyed due to some exception or failure.
925 : : *
926 : : * @return
927 : : * 0 on success,
928 : : * negative value on failure indicating the error code.
929 : : */
930 : : __rte_experimental
931 : : int rte_dma_access_pair_group_join(int16_t dev_id, rte_uuid_t domain_id, rte_uuid_t token,
932 : : int16_t group_id, rte_dma_access_pair_group_event_cb_t cb);
933 : :
934 : : /**
935 : : * Leave an access group, removing the device's entry from the group table
936 : : * and disabling inter-domain DMA transfers to and from this device.
937 : : *
938 : : * @warning
939 : : * @b EXPERIMENTAL: this API may change without prior notice.
940 : : *
941 : : * This operation is not permitted for the device that originally created the group.
942 : : *
943 : : * @param dev_id
944 : : * Identifier of the device requesting to leave the group.
945 : : * @param group_id
946 : : * ID of the access group to leave.
947 : : * @return
948 : : * 0 on success,
949 : : * negative value on failure indicating the error code.
950 : : */
951 : : __rte_experimental
952 : : int rte_dma_access_pair_group_leave(int16_t dev_id, int16_t group_id);
953 : :
954 : : /**
955 : : * Retrieve the handler associated with a specific domain ID,
956 : : * which is used by the application to query source or destinationin handler
957 : : * to initiate inter-process or inter-OS DMA transfers.
958 : : *
959 : : * @warning
960 : : * @b EXPERIMENTAL: this API may change without prior notice.
961 : : *
962 : : * @param dev_id
963 : : * Identifier of the DMA device requesting the handler.
964 : : * @param group_id
965 : : * ID of the access group to query.
966 : : * @param domain_id
967 : : * Unique identifier of the target process or OS domain.
968 : : * @param[out] handler
969 : : * Pointer to store the retrieved handler value.
970 : : * @return
971 : : * 0 on success,
972 : : * negative value on failure indicating the error code.
973 : : */
974 : : __rte_experimental
975 : : int rte_dma_access_pair_group_handler_get(int16_t dev_id, int16_t group_id, rte_uuid_t domain_id,
976 : : uint16_t *handler);
977 : :
978 : : /**
979 : : * DMA transfer result status code defines.
980 : : *
981 : : * @see rte_dma_completed_status
982 : : */
983 : : enum rte_dma_status_code {
984 : : /** The operation completed successfully. */
985 : : RTE_DMA_STATUS_SUCCESSFUL,
986 : : /** The operation failed to complete due abort by user.
987 : : * This is mainly used when processing dev_stop, user could modify the
988 : : * descriptors (e.g. change one bit to tell hardware abort this job),
989 : : * it allows outstanding requests to be complete as much as possible,
990 : : * so reduce the time to stop the device.
991 : : */
992 : : RTE_DMA_STATUS_USER_ABORT,
993 : : /** The operation failed to complete due to following scenarios:
994 : : * The jobs in a particular batch are not attempted because they
995 : : * appeared after a fence where a previous job failed. In some HW
996 : : * implementation it's possible for jobs from later batches would be
997 : : * completed, though, so report the status from the not attempted jobs
998 : : * before reporting those newer completed jobs.
999 : : */
1000 : : RTE_DMA_STATUS_NOT_ATTEMPTED,
1001 : : /** The operation failed to complete due invalid source address. */
1002 : : RTE_DMA_STATUS_INVALID_SRC_ADDR,
1003 : : /** The operation failed to complete due invalid destination address. */
1004 : : RTE_DMA_STATUS_INVALID_DST_ADDR,
1005 : : /** The operation failed to complete due invalid source or destination
1006 : : * address, cover the case that only knows the address error, but not
1007 : : * sure which address error.
1008 : : */
1009 : : RTE_DMA_STATUS_INVALID_ADDR,
1010 : : /** The operation failed to complete due invalid length. */
1011 : : RTE_DMA_STATUS_INVALID_LENGTH,
1012 : : /** The operation failed to complete due invalid opcode.
1013 : : * The DMA descriptor could have multiple format, which are
1014 : : * distinguished by the opcode field.
1015 : : */
1016 : : RTE_DMA_STATUS_INVALID_OPCODE,
1017 : : /** The operation failed to complete due bus read error. */
1018 : : RTE_DMA_STATUS_BUS_READ_ERROR,
1019 : : /** The operation failed to complete due bus write error. */
1020 : : RTE_DMA_STATUS_BUS_WRITE_ERROR,
1021 : : /** The operation failed to complete due bus error, cover the case that
1022 : : * only knows the bus error, but not sure which direction error.
1023 : : */
1024 : : RTE_DMA_STATUS_BUS_ERROR,
1025 : : /** The operation failed to complete due data poison. */
1026 : : RTE_DMA_STATUS_DATA_POISION,
1027 : : /** The operation failed to complete due descriptor read error. */
1028 : : RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
1029 : : /** The operation failed to complete due device link error.
1030 : : * Used to indicates that the link error in the memory-to-device/
1031 : : * device-to-memory/device-to-device transfer scenario.
1032 : : */
1033 : : RTE_DMA_STATUS_DEV_LINK_ERROR,
1034 : : /** The operation failed to complete due lookup page fault. */
1035 : : RTE_DMA_STATUS_PAGE_FAULT,
1036 : : /** The operation failed to complete due unknown reason.
1037 : : * The initial value is 256, which reserves space for future errors.
1038 : : */
1039 : : RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
1040 : : };
1041 : :
1042 : : /**
1043 : : * A structure used to hold scatter-gather DMA operation request entry.
1044 : : *
1045 : : * @see rte_dma_copy_sg
1046 : : */
1047 : : struct rte_dma_sge {
1048 : : rte_iova_t addr; /**< The DMA operation address. */
1049 : : uint32_t length; /**< The DMA operation length. */
1050 : : };
1051 : :
1052 : : /**
1053 : : * A structure used to hold event based DMA operation entry.
1054 : : * All the information required for a DMA transfer
1055 : : * shall be populated in "struct rte_dma_op" instance.
1056 : : */
1057 : : struct rte_dma_op {
1058 : : /** Flags related to the operation.
1059 : : * @see RTE_DMA_OP_FLAG_*
1060 : : */
1061 : : uint64_t flags;
1062 : : /** Mempool from which op is allocated. */
1063 : : struct rte_mempool *op_mp;
1064 : : /** Status code for this operation. */
1065 : : enum rte_dma_status_code status;
1066 : : /** Reserved for future use. */
1067 : : uint32_t rsvd;
1068 : : /** Implementation-specific opaque data.
1069 : : * A DMA device implementation use this field to hold
1070 : : * implementation-specific values
1071 : : * to share between dequeue and enqueue operations.
1072 : : * The application should not modify this field.
1073 : : */
1074 : : uint64_t impl_opaque[2];
1075 : : /** Memory to store user specific metadata.
1076 : : * The DMA device implementation should not modify this area.
1077 : : */
1078 : : uint64_t user_meta;
1079 : : /** Event metadata of DMA completion event.
1080 : : * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND
1081 : : * is not supported in OP_NEW mode.
1082 : : * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_NEW
1083 : : * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND
1084 : : *
1085 : : * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
1086 : : * is not supported in OP_FWD mode.
1087 : : * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
1088 : : * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
1089 : : *
1090 : : * @see struct rte_event::event
1091 : : */
1092 : : uint64_t event_meta;
1093 : : /** DMA device ID to be used with OP_FORWARD mode.
1094 : : * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
1095 : : */
1096 : : int16_t dma_dev_id;
1097 : : /** DMA vchan ID to be used with OP_FORWARD mode
1098 : : * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
1099 : : */
1100 : : uint16_t vchan;
1101 : : /** Number of source segments. */
1102 : : uint16_t nb_src;
1103 : : /** Number of destination segments. */
1104 : : uint16_t nb_dst;
1105 : : /** Source and destination segments. */
1106 : : struct rte_dma_sge src_dst_seg[];
1107 : : };
1108 : :
1109 : : #ifdef __cplusplus
1110 : : }
1111 : : #endif
1112 : :
1113 : : #include "rte_dmadev_core.h"
1114 : : #include "rte_dmadev_trace_fp.h"
1115 : :
1116 : : #ifdef __cplusplus
1117 : : extern "C" {
1118 : : #endif
1119 : :
1120 : : /**@{@name DMA operation flag
1121 : : * @see rte_dma_copy()
1122 : : * @see rte_dma_copy_sg()
1123 : : * @see rte_dma_fill()
1124 : : */
1125 : : /** Fence flag.
1126 : : * It means the operation with this flag must be processed only after all
1127 : : * previous operations are completed.
1128 : : * If the specify DMA HW works in-order (it means it has default fence between
1129 : : * operations), this flag could be NOP.
1130 : : */
1131 : : #define RTE_DMA_OP_FLAG_FENCE RTE_BIT64(0)
1132 : : /** Submit flag.
1133 : : * It means the operation with this flag must issue doorbell to hardware after
1134 : : * enqueued jobs.
1135 : : */
1136 : : #define RTE_DMA_OP_FLAG_SUBMIT RTE_BIT64(1)
1137 : : /** Write data to low level cache hint.
1138 : : * Used for performance optimization, this is just a hint, and there is no
1139 : : * capability bit for this, driver should not return error if this flag was set.
1140 : : */
1141 : : #define RTE_DMA_OP_FLAG_LLC RTE_BIT64(2)
1142 : : /** Auto free buffer flag.
1143 : : * Operation with this flag must issue command to hardware to free the DMA
1144 : : * buffer after DMA transfer is completed.
1145 : : *
1146 : : * @see struct rte_dma_vchan_conf::auto_free
1147 : : */
1148 : : #define RTE_DMA_OP_FLAG_AUTO_FREE RTE_BIT64(3)
1149 : : /**@}*/
1150 : :
1151 : : /**
1152 : : * Enqueue a copy operation onto the virtual DMA channel.
1153 : : *
1154 : : * This queues up a copy operation to be performed by hardware, if the 'flags'
1155 : : * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
1156 : : * this operation, otherwise do not trigger doorbell.
1157 : : *
1158 : : * @param dev_id
1159 : : * The identifier of the device.
1160 : : * @param vchan
1161 : : * The identifier of virtual DMA channel.
1162 : : * @param src
1163 : : * The address of the source buffer.
1164 : : * @param dst
1165 : : * The address of the destination buffer.
1166 : : * @param length
1167 : : * The length of the data to be copied.
1168 : : * @param flags
1169 : : * An flags for this operation.
1170 : : * @see RTE_DMA_OP_FLAG_*
1171 : : *
1172 : : * @return
1173 : : * - 0..UINT16_MAX: index of enqueued job.
1174 : : * - -ENOSPC: if no space left to enqueue.
1175 : : * - other values < 0 on failure.
1176 : : */
1177 : : static inline int
1178 : : rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
1179 : : uint32_t length, uint64_t flags)
1180 : : {
1181 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1182 : : int ret;
1183 : :
1184 : : #ifdef RTE_DMADEV_DEBUG
1185 : : if (!rte_dma_is_valid(dev_id) || length == 0)
1186 : : return -EINVAL;
1187 : : if (obj->copy == NULL)
1188 : : return -ENOTSUP;
1189 : : #endif
1190 : :
1191 : 0 : ret = obj->copy(obj->dev_private, vchan, src, dst, length, flags);
1192 : : rte_dma_trace_copy(dev_id, vchan, src, dst, length, flags, ret);
1193 : :
1194 : : return ret;
1195 : : }
1196 : :
1197 : : /**
1198 : : * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
1199 : : *
1200 : : * This queues up a scatter-gather list copy operation to be performed by
1201 : : * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
1202 : : * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
1203 : : *
1204 : : * @param dev_id
1205 : : * The identifier of the device.
1206 : : * @param vchan
1207 : : * The identifier of virtual DMA channel.
1208 : : * @param src
1209 : : * The pointer of source scatter-gather entry array.
1210 : : * @param dst
1211 : : * The pointer of destination scatter-gather entry array.
1212 : : * @param nb_src
1213 : : * The number of source scatter-gather entry.
1214 : : * @see struct rte_dma_info::max_sges
1215 : : * @param nb_dst
1216 : : * The number of destination scatter-gather entry.
1217 : : * @see struct rte_dma_info::max_sges
1218 : : * @param flags
1219 : : * An flags for this operation.
1220 : : * @see RTE_DMA_OP_FLAG_*
1221 : : *
1222 : : * @return
1223 : : * - 0..UINT16_MAX: index of enqueued job.
1224 : : * - -ENOSPC: if no space left to enqueue.
1225 : : * - other values < 0 on failure.
1226 : : */
1227 : : static inline int
1228 : : rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
1229 : : struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
1230 : : uint64_t flags)
1231 : : {
1232 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1233 : : int ret;
1234 : :
1235 : : #ifdef RTE_DMADEV_DEBUG
1236 : : if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL ||
1237 : : nb_src == 0 || nb_dst == 0)
1238 : : return -EINVAL;
1239 : : if (obj->copy_sg == NULL)
1240 : : return -ENOTSUP;
1241 : : #endif
1242 : :
1243 : 0 : ret = obj->copy_sg(obj->dev_private, vchan, src, dst, nb_src, nb_dst, flags);
1244 : : rte_dma_trace_copy_sg(dev_id, vchan, src, dst, nb_src, nb_dst, flags,
1245 : : ret);
1246 : :
1247 : : return ret;
1248 : : }
1249 : :
1250 : : /**
1251 : : * Enqueue a fill operation onto the virtual DMA channel.
1252 : : *
1253 : : * This queues up a fill operation to be performed by hardware, if the 'flags'
1254 : : * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
1255 : : * this operation, otherwise do not trigger doorbell.
1256 : : *
1257 : : * @param dev_id
1258 : : * The identifier of the device.
1259 : : * @param vchan
1260 : : * The identifier of virtual DMA channel.
1261 : : * @param pattern
1262 : : * The pattern to populate the destination buffer with.
1263 : : * @param dst
1264 : : * The address of the destination buffer.
1265 : : * @param length
1266 : : * The length of the destination buffer.
1267 : : * @param flags
1268 : : * An flags for this operation.
1269 : : * @see RTE_DMA_OP_FLAG_*
1270 : : *
1271 : : * @return
1272 : : * - 0..UINT16_MAX: index of enqueued job.
1273 : : * - -ENOSPC: if no space left to enqueue.
1274 : : * - other values < 0 on failure.
1275 : : */
1276 : : static inline int
1277 : : rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
1278 : : rte_iova_t dst, uint32_t length, uint64_t flags)
1279 : : {
1280 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1281 : : int ret;
1282 : :
1283 : : #ifdef RTE_DMADEV_DEBUG
1284 : : if (!rte_dma_is_valid(dev_id) || length == 0)
1285 : : return -EINVAL;
1286 : : if (obj->fill == NULL)
1287 : : return -ENOTSUP;
1288 : : #endif
1289 : :
1290 : 0 : ret = obj->fill(obj->dev_private, vchan, pattern, dst, length, flags);
1291 : : rte_dma_trace_fill(dev_id, vchan, pattern, dst, length, flags, ret);
1292 : :
1293 : : return ret;
1294 : : }
1295 : :
1296 : : /**
1297 : : * Trigger hardware to begin performing enqueued operations.
1298 : : *
1299 : : * Writes the "doorbell" to the hardware to trigger it
1300 : : * to begin the operations previously enqueued by rte_dma_copy/fill().
1301 : : *
1302 : : * @param dev_id
1303 : : * The identifier of the device.
1304 : : * @param vchan
1305 : : * The identifier of virtual DMA channel.
1306 : : *
1307 : : * @return
1308 : : * 0 on success. Otherwise negative value is returned.
1309 : : */
1310 : : static inline int
1311 : : rte_dma_submit(int16_t dev_id, uint16_t vchan)
1312 : : {
1313 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1314 : : int ret;
1315 : :
1316 : : #ifdef RTE_DMADEV_DEBUG
1317 : : if (!rte_dma_is_valid(dev_id))
1318 : : return -EINVAL;
1319 : : if (obj->submit == NULL)
1320 : : return -ENOTSUP;
1321 : : #endif
1322 : :
1323 : 0 : ret = obj->submit(obj->dev_private, vchan);
1324 : : rte_dma_trace_submit(dev_id, vchan, ret);
1325 : :
1326 : 0 : return ret;
1327 : : }
1328 : :
1329 : : /**
1330 : : * Return the number of operations that have been successfully completed.
1331 : : * Once an operation has been reported as completed, the results of that
1332 : : * operation will be visible to all cores on the system.
1333 : : *
1334 : : * @param dev_id
1335 : : * The identifier of the device.
1336 : : * @param vchan
1337 : : * The identifier of virtual DMA channel.
1338 : : * @param nb_cpls
1339 : : * The maximum number of completed operations that can be processed.
1340 : : * @param[out] last_idx
1341 : : * The last completed operation's ring_idx.
1342 : : * If not required, NULL can be passed in.
1343 : : * @param[out] has_error
1344 : : * Indicates if there are transfer error.
1345 : : * If not required, NULL can be passed in.
1346 : : *
1347 : : * @return
1348 : : * The number of operations that successfully completed. This return value
1349 : : * must be less than or equal to the value of nb_cpls.
1350 : : */
1351 : : static inline uint16_t
1352 : : rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
1353 : : uint16_t *last_idx, bool *has_error)
1354 : : {
1355 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1356 : : uint16_t idx, ret;
1357 : : bool err;
1358 : :
1359 : : #ifdef RTE_DMADEV_DEBUG
1360 : : if (!rte_dma_is_valid(dev_id) || nb_cpls == 0)
1361 : : return 0;
1362 : : if (obj->completed == NULL)
1363 : : return 0;
1364 : : #endif
1365 : :
1366 : : /* Ensure the pointer values are non-null to simplify drivers.
1367 : : * In most cases these should be compile time evaluated, since this is
1368 : : * an inline function.
1369 : : * - If NULL is explicitly passed as parameter, then compiler knows the
1370 : : * value is NULL
1371 : : * - If address of local variable is passed as parameter, then compiler
1372 : : * can know it's non-NULL.
1373 : : */
1374 : : if (last_idx == NULL)
1375 : : last_idx = &idx;
1376 : : if (has_error == NULL)
1377 : : has_error = &err;
1378 : :
1379 : 0 : *has_error = false;
1380 : 0 : ret = obj->completed(obj->dev_private, vchan, nb_cpls, last_idx, has_error);
1381 : : rte_dma_trace_completed(dev_id, vchan, nb_cpls, last_idx, has_error,
1382 : : ret);
1383 : :
1384 : : return ret;
1385 : : }
1386 : :
1387 : : /**
1388 : : * Return the number of operations that have been completed, and the operations
1389 : : * result may succeed or fail.
1390 : : * Once an operation has been reported as completed successfully, the results of that
1391 : : * operation will be visible to all cores on the system.
1392 : : *
1393 : : * @param dev_id
1394 : : * The identifier of the device.
1395 : : * @param vchan
1396 : : * The identifier of virtual DMA channel.
1397 : : * @param nb_cpls
1398 : : * Indicates the size of status array.
1399 : : * @param[out] last_idx
1400 : : * The last completed operation's ring_idx.
1401 : : * If not required, NULL can be passed in.
1402 : : * @param[out] status
1403 : : * This is a pointer to an array of length 'nb_cpls' that holds the completion
1404 : : * status code of each operation.
1405 : : * @see enum rte_dma_status_code
1406 : : *
1407 : : * @return
1408 : : * The number of operations that completed. This return value must be less
1409 : : * than or equal to the value of nb_cpls.
1410 : : * If this number is greater than zero (assuming n), then n values in the
1411 : : * status array are also set.
1412 : : */
1413 : : static inline uint16_t
1414 : : rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
1415 : : const uint16_t nb_cpls, uint16_t *last_idx,
1416 : : enum rte_dma_status_code *status)
1417 : : {
1418 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1419 : : uint16_t idx, ret;
1420 : :
1421 : : #ifdef RTE_DMADEV_DEBUG
1422 : : if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
1423 : : return 0;
1424 : : if (obj->completed_status == NULL)
1425 : : return 0;
1426 : : #endif
1427 : :
1428 : : if (last_idx == NULL)
1429 : : last_idx = &idx;
1430 : :
1431 : 0 : ret = obj->completed_status(obj->dev_private, vchan, nb_cpls, last_idx, status);
1432 : : rte_dma_trace_completed_status(dev_id, vchan, nb_cpls, last_idx, status,
1433 : : ret);
1434 : :
1435 : : return ret;
1436 : : }
1437 : :
1438 : : /**
1439 : : * Check remaining capacity in descriptor ring for the current burst.
1440 : : *
1441 : : * @param dev_id
1442 : : * The identifier of the device.
1443 : : * @param vchan
1444 : : * The identifier of virtual DMA channel.
1445 : : *
1446 : : * @return
1447 : : * - Remaining space in the descriptor ring for the current burst.
1448 : : * - 0 on error
1449 : : */
1450 : : static inline uint16_t
1451 : : rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan)
1452 : : {
1453 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1454 : : uint16_t ret;
1455 : :
1456 : : #ifdef RTE_DMADEV_DEBUG
1457 : : if (!rte_dma_is_valid(dev_id))
1458 : : return 0;
1459 : : if (obj->burst_capacity == NULL)
1460 : : return 0;
1461 : : #endif
1462 : 0 : ret = obj->burst_capacity(obj->dev_private, vchan);
1463 : : rte_dma_trace_burst_capacity(dev_id, vchan, ret);
1464 : :
1465 : : return ret;
1466 : : }
1467 : :
1468 : : /**
1469 : : * Enqueue rte_dma_ops to DMA device, can only be used underlying supports
1470 : : * RTE_DMA_CAPA_OPS_ENQ_DEQ and rte_dma_conf::enable_enq_deq is enabled in
1471 : : * rte_dma_configure().
1472 : : * The ops enqueued will be immediately submitted to the DMA device.
1473 : : * The enqueue should be coupled with dequeue to retrieve completed ops,
1474 : : * calls to rte_dma_submit(), rte_dma_completed() and rte_dma_completed_status()
1475 : : * are not valid.
1476 : : *
1477 : : * @param dev_id
1478 : : * The identifier of the device.
1479 : : * @param vchan
1480 : : * The identifier of virtual DMA channel.
1481 : : * @param ops
1482 : : * Pointer to rte_dma_op array.
1483 : : * @param nb_ops
1484 : : * Number of rte_dma_op in the ops array
1485 : : * @return uint16_t
1486 : : * Number of successfully submitted ops.
1487 : : */
1488 : : static inline uint16_t
1489 : : rte_dma_enqueue_ops(int16_t dev_id, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
1490 : : {
1491 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1492 : : uint16_t ret;
1493 : :
1494 : : #ifdef RTE_DMADEV_DEBUG
1495 : : if (!rte_dma_is_valid(dev_id))
1496 : : return 0;
1497 : : if (*obj->enqueue == NULL)
1498 : : return 0;
1499 : : #endif
1500 : :
1501 : 0 : ret = (*obj->enqueue)(obj->dev_private, vchan, ops, nb_ops);
1502 : : rte_dma_trace_enqueue_ops(dev_id, vchan, (void **)ops, nb_ops);
1503 : :
1504 : : return ret;
1505 : : }
1506 : :
1507 : : /**
1508 : : * Dequeue completed rte_dma_ops submitted to the DMA device, can only be used
1509 : : * underlying supports RTE_DMA_CAPA_OPS_ENQ_DEQ and rte_dma_conf::enable_enq_deq
1510 : : * is enabled in rte_dma_configure().
1511 : : *
1512 : : * @param dev_id
1513 : : * The identifier of the device.
1514 : : * @param vchan
1515 : : * The identifier of virtual DMA channel.
1516 : : * @param ops
1517 : : * Pointer to rte_dma_op array.
1518 : : * @param nb_ops
1519 : : * Size of rte_dma_op array.
1520 : : * @return
1521 : : * Number of successfully completed ops. Should be less or equal to nb_ops.
1522 : : */
1523 : : static inline uint16_t
1524 : : rte_dma_dequeue_ops(int16_t dev_id, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
1525 : : {
1526 : 0 : struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1527 : : uint16_t ret;
1528 : :
1529 : : #ifdef RTE_DMADEV_DEBUG
1530 : : if (!rte_dma_is_valid(dev_id))
1531 : : return 0;
1532 : : if (*obj->dequeue == NULL)
1533 : : return 0;
1534 : : #endif
1535 : :
1536 : 0 : ret = (*obj->dequeue)(obj->dev_private, vchan, ops, nb_ops);
1537 : : rte_dma_trace_dequeue_ops(dev_id, vchan, (void **)ops, nb_ops);
1538 : :
1539 : : return ret;
1540 : : }
1541 : :
1542 : : #ifdef __cplusplus
1543 : : }
1544 : : #endif
1545 : :
1546 : : #endif /* RTE_DMADEV_H */
|