Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (c) 2023 Marvell.
3 : : */
4 : :
5 : : #include "test.h"
6 : : #include <string.h>
7 : : #include <rte_common.h>
8 : : #include <rte_malloc.h>
9 : : #include <rte_mempool.h>
10 : : #include <rte_mbuf.h>
11 : : #include <rte_random.h>
12 : :
13 : : #ifdef RTE_EXEC_ENV_WINDOWS
14 : : static int
15 : : test_event_dma_adapter(void)
16 : : {
17 : : printf("event_dma_adapter not supported on Windows, skipping test\n");
18 : : return TEST_SKIPPED;
19 : : }
20 : :
21 : : #else
22 : :
23 : : #include <rte_bus_vdev.h>
24 : : #include <rte_dmadev.h>
25 : : #include <rte_eventdev.h>
26 : : #include <rte_event_dma_adapter.h>
27 : : #include <rte_service.h>
28 : :
29 : : #define NUM_MBUFS (8191)
30 : : #define MBUF_CACHE_SIZE (256)
31 : : #define TEST_APP_PORT_ID 0
32 : : #define TEST_APP_EV_QUEUE_ID 0
33 : : #define TEST_APP_EV_PRIORITY 0
34 : : #define TEST_APP_EV_FLOWID 0xAABB
35 : : #define TEST_DMA_EV_QUEUE_ID 1
36 : : #define TEST_ADAPTER_ID 0
37 : : #define TEST_DMA_DEV_ID 0
38 : : #define TEST_DMA_VCHAN_ID 0
39 : : #define PACKET_LENGTH 1024
40 : : #define NB_TEST_PORTS 1
41 : : #define NB_TEST_QUEUES 2
42 : : #define NUM_CORES 2
43 : : #define DMA_OP_POOL_SIZE 128
44 : : #define TEST_MAX_OP 32
45 : : #define TEST_RINGSIZE 512
46 : :
47 : : #define MBUF_SIZE (RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)
48 : :
49 : : /* Handle log statements in same manner as test macros */
50 : : #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
51 : :
52 : : struct event_dma_adapter_test_params {
53 : : struct rte_mempool *src_mbuf_pool;
54 : : struct rte_mempool *dst_mbuf_pool;
55 : : struct rte_mempool *op_mpool;
56 : : uint8_t dma_event_port_id;
57 : : uint8_t internal_port_op_fwd;
58 : : };
59 : :
60 : : struct rte_event dma_response_info = {
61 : : .queue_id = TEST_APP_EV_QUEUE_ID,
62 : : .sched_type = RTE_SCHED_TYPE_ATOMIC,
63 : : .flow_id = TEST_APP_EV_FLOWID,
64 : : .priority = TEST_APP_EV_PRIORITY,
65 : : .op = RTE_EVENT_OP_NEW,
66 : : };
67 : :
68 : : static struct event_dma_adapter_test_params params;
69 : : static uint8_t dma_adapter_setup_done;
70 : : static uint32_t slcore_id;
71 : : static int evdev;
72 : :
73 : : static int
74 : 0 : send_recv_ev(struct rte_event *ev)
75 : : {
76 : : struct rte_event recv_ev[TEST_MAX_OP];
77 : : uint16_t nb_enqueued = 0;
78 : : int i = 0;
79 : :
80 [ # # ]: 0 : if (params.internal_port_op_fwd) {
81 : 0 : nb_enqueued = rte_event_dma_adapter_enqueue(evdev, TEST_APP_PORT_ID, ev,
82 : : TEST_MAX_OP);
83 : : } else {
84 [ # # ]: 0 : while (nb_enqueued < TEST_MAX_OP) {
85 : 0 : nb_enqueued += rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID,
86 : 0 : &ev[nb_enqueued], TEST_MAX_OP -
87 : : nb_enqueued);
88 : : }
89 : : }
90 : :
91 [ # # ]: 0 : TEST_ASSERT_EQUAL(nb_enqueued, TEST_MAX_OP, "Failed to send event to dma adapter\n");
92 : :
93 [ # # ]: 0 : while (i < TEST_MAX_OP) {
94 [ # # ]: 0 : if (rte_event_dequeue_burst(evdev, TEST_APP_PORT_ID, &recv_ev[i], 1, 0) != 1)
95 : 0 : continue;
96 : 0 : i++;
97 : : }
98 : :
99 : : TEST_ASSERT_EQUAL(i, TEST_MAX_OP, "Test failed. Failed to dequeue events.\n");
100 : :
101 : : return TEST_SUCCESS;
102 : : }
103 : :
104 : : static int
105 : 0 : test_dma_adapter_stats(void)
106 : : {
107 : : struct rte_event_dma_adapter_stats stats;
108 : :
109 : 0 : rte_event_dma_adapter_stats_get(TEST_ADAPTER_ID, &stats);
110 : : printf(" +------------------------------------------------------+\n");
111 : : printf(" + DMA adapter stats for instance %u:\n", TEST_ADAPTER_ID);
112 : 0 : printf(" + Event port poll count 0x%" PRIx64 "\n",
113 : : stats.event_poll_count);
114 : 0 : printf(" + Event dequeue count 0x%" PRIx64 "\n",
115 : : stats.event_deq_count);
116 : 0 : printf(" + DMA dev enqueue count 0x%" PRIx64 "\n",
117 : : stats.dma_enq_count);
118 : 0 : printf(" + DMA dev enqueue failed count 0x%" PRIx64 "\n",
119 : : stats.dma_enq_fail_count);
120 : 0 : printf(" + DMA dev dequeue count 0x%" PRIx64 "\n",
121 : : stats.dma_deq_count);
122 : 0 : printf(" + Event enqueue count 0x%" PRIx64 "\n",
123 : : stats.event_enq_count);
124 : 0 : printf(" + Event enqueue retry count 0x%" PRIx64 "\n",
125 : : stats.event_enq_retry_count);
126 : 0 : printf(" + Event enqueue fail count 0x%" PRIx64 "\n",
127 : : stats.event_enq_fail_count);
128 : : printf(" +------------------------------------------------------+\n");
129 : :
130 : 0 : rte_event_dma_adapter_stats_reset(TEST_ADAPTER_ID);
131 : 0 : return TEST_SUCCESS;
132 : : }
133 : :
134 : : static int
135 : 0 : test_dma_adapter_params(void)
136 : : {
137 : : struct rte_event_dma_adapter_runtime_params out_params;
138 : : struct rte_event_dma_adapter_runtime_params in_params;
139 : : uint32_t cap;
140 : : int err, rc;
141 : :
142 : 0 : err = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
143 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to get adapter capabilities\n");
144 : :
145 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
146 : 0 : struct rte_event event = { .queue_id = 0, };
147 : :
148 : 0 : err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
149 : : TEST_DMA_VCHAN_ID, &event);
150 : : } else
151 : 0 : err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
152 : : TEST_DMA_VCHAN_ID, NULL);
153 : :
154 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to add vchan\n");
155 : :
156 : 0 : err = rte_event_dma_adapter_runtime_params_init(&in_params);
157 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
158 : 0 : err = rte_event_dma_adapter_runtime_params_init(&out_params);
159 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
160 : :
161 : : /* Case 1: Get the default value of mbufs processed by adapter */
162 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
163 [ # # ]: 0 : if (err == -ENOTSUP) {
164 : : rc = TEST_SKIPPED;
165 : 0 : goto vchan_del;
166 : : }
167 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
168 : :
169 : : /* Case 2: Set max_nb = 32 (=BATCH_SEIZE) */
170 : 0 : in_params.max_nb = 32;
171 : :
172 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
173 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
174 : :
175 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
176 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
177 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
178 : : in_params.max_nb, out_params.max_nb);
179 : :
180 : : /* Case 3: Set max_nb = 192 */
181 : 0 : in_params.max_nb = 192;
182 : :
183 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
184 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
185 : :
186 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
187 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
188 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
189 : : in_params.max_nb, out_params.max_nb);
190 : :
191 : : /* Case 4: Set max_nb = 256 */
192 : 0 : in_params.max_nb = 256;
193 : :
194 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
195 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
196 : :
197 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
198 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
199 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
200 : : in_params.max_nb, out_params.max_nb);
201 : :
202 : : /* Case 5: Set max_nb = 30(<BATCH_SIZE) */
203 : 0 : in_params.max_nb = 30;
204 : :
205 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
206 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
207 : :
208 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
209 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
210 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
211 : : in_params.max_nb, out_params.max_nb);
212 : :
213 : : /* Case 6: Set max_nb = 512 */
214 : 0 : in_params.max_nb = 512;
215 : :
216 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
217 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
218 : :
219 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
220 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
221 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
222 : : in_params.max_nb, out_params.max_nb);
223 : :
224 : : rc = TEST_SUCCESS;
225 : 0 : vchan_del:
226 : 0 : err = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
227 : : TEST_DMA_VCHAN_ID);
228 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to delete vchan\n");
229 : :
230 : : return rc;
231 : : }
232 : :
233 : : static int
234 : 0 : test_op_forward_mode(void)
235 : : {
236 : : struct rte_mbuf *src_mbuf[TEST_MAX_OP];
237 : : struct rte_mbuf *dst_mbuf[TEST_MAX_OP];
238 : : struct rte_dma_op *op;
239 : : struct rte_event ev[TEST_MAX_OP];
240 : : int ret, i;
241 : :
242 : 0 : ret = rte_pktmbuf_alloc_bulk(params.src_mbuf_pool, src_mbuf, TEST_MAX_OP);
243 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "alloc src mbufs failed.\n");
244 : :
245 : 0 : ret = rte_pktmbuf_alloc_bulk(params.dst_mbuf_pool, dst_mbuf, TEST_MAX_OP);
246 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "alloc dst mbufs failed.\n");
247 : :
248 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
249 : 0 : memset(rte_pktmbuf_mtod(src_mbuf[i], void *), rte_rand(), PACKET_LENGTH);
250 : 0 : memset(rte_pktmbuf_mtod(dst_mbuf[i], void *), 0, PACKET_LENGTH);
251 : : }
252 : :
253 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
254 [ # # ]: 0 : rte_mempool_get(params.op_mpool, (void **)&op);
255 [ # # ]: 0 : TEST_ASSERT_NOT_NULL(op, "Failed to allocate dma operation struct\n");
256 : :
257 : : /* Update Op */
258 [ # # ]: 0 : op->src_dst_seg[0].addr = rte_pktmbuf_iova(src_mbuf[i]);
259 : 0 : op->src_dst_seg[1].addr = rte_pktmbuf_iova(dst_mbuf[i]);
260 : 0 : op->src_dst_seg[0].length = PACKET_LENGTH;
261 : 0 : op->src_dst_seg[1].length = PACKET_LENGTH;
262 : 0 : op->nb_src = 1;
263 : 0 : op->nb_dst = 1;
264 : 0 : op->flags = RTE_DMA_OP_FLAG_SUBMIT;
265 : 0 : op->op_mp = params.op_mpool;
266 : 0 : op->dma_dev_id = TEST_DMA_DEV_ID;
267 : 0 : op->vchan = TEST_DMA_VCHAN_ID;
268 : 0 : op->event_meta = dma_response_info.event;
269 : :
270 : : /* Fill in event info and update event_ptr with rte_dma_op */
271 [ # # ]: 0 : memset(&ev[i], 0, sizeof(struct rte_event));
272 : 0 : ev[i].event = 0;
273 : 0 : ev[i].op = RTE_EVENT_OP_NEW;
274 : 0 : ev[i].event_type = RTE_EVENT_TYPE_DMADEV;
275 [ # # ]: 0 : if (params.internal_port_op_fwd)
276 : 0 : ev[i].queue_id = TEST_APP_EV_QUEUE_ID;
277 : : else
278 : 0 : ev[i].queue_id = TEST_DMA_EV_QUEUE_ID;
279 : 0 : ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
280 : 0 : ev[i].flow_id = 0xAABB;
281 : 0 : ev[i].event_ptr = op;
282 : : }
283 : :
284 : 0 : ret = send_recv_ev(ev);
285 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to dma adapter\n");
286 : :
287 : 0 : test_dma_adapter_stats();
288 : :
289 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
290 : 0 : op = ev[i].event_ptr;
291 : 0 : ret = memcmp(rte_pktmbuf_mtod(src_mbuf[i], void *),
292 : 0 : rte_pktmbuf_mtod(dst_mbuf[i], void *), PACKET_LENGTH);
293 : :
294 [ # # ]: 0 : TEST_ASSERT_EQUAL(ret, 0, "Data mismatch for dma adapter\n");
295 : :
296 [ # # ]: 0 : rte_mempool_put(op->op_mp, op);
297 : : }
298 : :
299 : 0 : rte_pktmbuf_free_bulk(src_mbuf, TEST_MAX_OP);
300 : 0 : rte_pktmbuf_free_bulk(dst_mbuf, TEST_MAX_OP);
301 : :
302 : 0 : return TEST_SUCCESS;
303 : : }
304 : :
305 : : static int
306 : 0 : map_adapter_service_core(void)
307 : : {
308 : : uint32_t adapter_service_id;
309 : : int ret;
310 : :
311 [ # # ]: 0 : if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, &adapter_service_id) == 0) {
312 : : uint32_t core_list[NUM_CORES];
313 : :
314 : 0 : ret = rte_service_lcore_list(core_list, NUM_CORES);
315 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to get service core list!");
316 : :
317 [ # # ]: 0 : if (core_list[0] != slcore_id) {
318 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
319 : : "Failed to add service core");
320 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
321 : : "Failed to start service core");
322 : : }
323 : :
324 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
325 : : adapter_service_id, slcore_id, 1),
326 : : "Failed to map adapter service");
327 : : }
328 : :
329 : : return TEST_SUCCESS;
330 : : }
331 : :
332 : : static int
333 : 0 : test_with_op_forward_mode(void)
334 : : {
335 : : uint32_t cap;
336 : : int ret;
337 : :
338 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
339 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
340 : :
341 [ # # ]: 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
342 : : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
343 : 0 : map_adapter_service_core();
344 : : else {
345 [ # # ]: 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
346 : : return TEST_SKIPPED;
347 : : }
348 : :
349 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_event_dma_adapter_start(TEST_ADAPTER_ID),
350 : : "Failed to start event dma adapter");
351 : :
352 : 0 : ret = test_op_forward_mode();
353 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "DMA - FORWARD mode test failed\n");
354 : : return TEST_SUCCESS;
355 : : }
356 : :
357 : : static int
358 : 0 : configure_dmadev(void)
359 : : {
360 : 0 : const struct rte_dma_conf conf = { .nb_vchans = 1};
361 : 0 : const struct rte_dma_vchan_conf qconf = {
362 : : .direction = RTE_DMA_DIR_MEM_TO_MEM,
363 : : .nb_desc = TEST_RINGSIZE,
364 : : };
365 : : struct rte_dma_info info;
366 : : unsigned int elt_size;
367 : : int ret;
368 : :
369 : 0 : ret = rte_dma_count_avail();
370 [ # # ]: 0 : RTE_TEST_ASSERT_FAIL(ret, "No dma devices found!\n");
371 : :
372 : 0 : ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
373 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Error with rte_dma_info_get()\n");
374 : :
375 [ # # ]: 0 : if (info.max_vchans < 1)
376 : 0 : RTE_LOG(ERR, USER1, "Error, no channels available on device id %u\n",
377 : : TEST_DMA_DEV_ID);
378 : :
379 [ # # ]: 0 : if (rte_dma_configure(TEST_DMA_DEV_ID, &conf) != 0)
380 : 0 : RTE_LOG(ERR, USER1, "Error with rte_dma_configure()\n");
381 : :
382 [ # # ]: 0 : if (rte_dma_vchan_setup(TEST_DMA_DEV_ID, TEST_DMA_VCHAN_ID, &qconf) < 0)
383 : 0 : RTE_LOG(ERR, USER1, "Error with vchan configuration\n");
384 : :
385 : 0 : ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
386 [ # # # # ]: 0 : if (ret != 0 || info.nb_vchans != 1)
387 : 0 : RTE_LOG(ERR, USER1, "Error, no configured vhcan reported on device id %u\n",
388 : : TEST_DMA_DEV_ID);
389 : :
390 : 0 : params.src_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_SRC_MBUFPOOL", NUM_MBUFS,
391 : : MBUF_CACHE_SIZE, 0, MBUF_SIZE,
392 : 0 : rte_socket_id());
393 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.src_mbuf_pool, "Can't create DMA_SRC_MBUFPOOL\n");
394 : :
395 : 0 : params.dst_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_DST_MBUFPOOL", NUM_MBUFS,
396 : : MBUF_CACHE_SIZE, 0, MBUF_SIZE,
397 : 0 : rte_socket_id());
398 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n");
399 : :
400 : : elt_size = sizeof(struct rte_dma_op) + (sizeof(struct rte_dma_sge) * 2);
401 : 0 : params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0,
402 : 0 : 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
403 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n");
404 : :
405 : : return TEST_SUCCESS;
406 : : }
407 : :
408 : : static inline void
409 : 0 : evdev_set_conf_values(struct rte_event_dev_config *dev_conf, struct rte_event_dev_info *info)
410 : : {
411 : : memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
412 : 0 : dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
413 : 0 : dev_conf->nb_event_ports = NB_TEST_PORTS;
414 : 0 : dev_conf->nb_event_queues = NB_TEST_QUEUES;
415 : 0 : dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
416 : 0 : dev_conf->nb_event_port_dequeue_depth =
417 : 0 : info->max_event_port_dequeue_depth;
418 : 0 : dev_conf->nb_event_port_enqueue_depth =
419 : 0 : info->max_event_port_enqueue_depth;
420 : : dev_conf->nb_event_port_enqueue_depth =
421 : : info->max_event_port_enqueue_depth;
422 : 0 : dev_conf->nb_events_limit =
423 : 0 : info->max_num_events;
424 : 0 : }
425 : :
426 : : static int
427 : 0 : configure_eventdev(void)
428 : : {
429 : : struct rte_event_queue_conf queue_conf;
430 : : struct rte_event_dev_config devconf;
431 : : struct rte_event_dev_info info;
432 : : uint32_t queue_count;
433 : : uint32_t port_count;
434 : : uint8_t qid;
435 : : int ret;
436 : :
437 [ # # ]: 0 : if (!rte_event_dev_count()) {
438 : : /* If there is no hardware eventdev, or no software vdev was
439 : : * specified on the command line, create an instance of
440 : : * event_sw.
441 : : */
442 : 0 : LOG_DBG("Failed to find a valid event device... "
443 : : "testing with event_sw device\n");
444 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
445 : : "Error creating eventdev");
446 : 0 : evdev = rte_event_dev_get_dev_id("event_sw0");
447 : : }
448 : :
449 : 0 : ret = rte_event_dev_info_get(evdev, &info);
450 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n");
451 : :
452 : 0 : evdev_set_conf_values(&devconf, &info);
453 : :
454 : 0 : ret = rte_event_dev_configure(evdev, &devconf);
455 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n");
456 : :
457 : : /* Set up event queue */
458 : 0 : ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count);
459 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n");
460 [ # # ]: 0 : TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n");
461 : :
462 : 0 : qid = TEST_APP_EV_QUEUE_ID;
463 : 0 : ret = rte_event_queue_setup(evdev, qid, NULL);
464 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid);
465 : :
466 : 0 : queue_conf.nb_atomic_flows = info.max_event_queue_flows;
467 : 0 : queue_conf.nb_atomic_order_sequences = 32;
468 : 0 : queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
469 : 0 : queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
470 : 0 : queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
471 : :
472 : 0 : qid = TEST_DMA_EV_QUEUE_ID;
473 : 0 : ret = rte_event_queue_setup(evdev, qid, &queue_conf);
474 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid);
475 : :
476 : : /* Set up event port */
477 : 0 : ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
478 : : &port_count);
479 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Port count get failed\n");
480 [ # # ]: 0 : TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n");
481 : :
482 : 0 : ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);
483 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n",
484 : : TEST_APP_PORT_ID);
485 : :
486 : 0 : qid = TEST_APP_EV_QUEUE_ID;
487 : 0 : ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);
488 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n",
489 : : TEST_APP_PORT_ID);
490 : :
491 : : return TEST_SUCCESS;
492 : : }
493 : :
494 : : static void
495 : 0 : test_dma_adapter_free(void)
496 : : {
497 : 0 : rte_event_dma_adapter_free(TEST_ADAPTER_ID);
498 : 0 : }
499 : :
500 : : static int
501 : 0 : test_dma_adapter_create(void)
502 : : {
503 : 0 : struct rte_event_dev_info evdev_info = {0};
504 : 0 : struct rte_event_port_conf conf = {0};
505 : : int ret;
506 : :
507 : 0 : ret = rte_event_dev_info_get(evdev, &evdev_info);
508 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
509 : :
510 : 0 : conf.new_event_threshold = evdev_info.max_num_events;
511 : 0 : conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
512 : 0 : conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
513 : :
514 : : /* Create adapter with default port creation callback */
515 : 0 : ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, 0);
516 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
517 : :
518 : : return TEST_SUCCESS;
519 : : }
520 : :
521 : : static int
522 : 0 : test_dma_adapter_vchan_add_del(void)
523 : : {
524 : : uint32_t cap;
525 : : int ret;
526 : :
527 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
528 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
529 : :
530 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
531 : 0 : struct rte_event event = { .queue_id = 0, };
532 : :
533 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
534 : : TEST_DMA_VCHAN_ID, &event);
535 : : } else
536 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
537 : : TEST_DMA_VCHAN_ID, NULL);
538 : :
539 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create add vchan\n");
540 : :
541 : 0 : ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
542 : : TEST_DMA_VCHAN_ID);
543 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to delete vchan\n");
544 : :
545 : : return TEST_SUCCESS;
546 : : }
547 : :
548 : : static int
549 : 0 : configure_event_dma_adapter(enum rte_event_dma_adapter_mode mode)
550 : : {
551 : 0 : struct rte_event_dev_info evdev_info = {0};
552 : 0 : struct rte_event_port_conf conf = {0};
553 : : struct rte_event event;
554 : : uint32_t cap;
555 : : int ret;
556 : :
557 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
558 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
559 : :
560 : : /* Skip mode and capability mismatch check for SW eventdev */
561 : 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
562 [ # # ]: 0 : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
563 : : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))
564 : 0 : goto adapter_create;
565 : :
566 [ # # ]: 0 : if (mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) {
567 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)
568 : 0 : params.internal_port_op_fwd = 1;
569 : : else
570 : : return -ENOTSUP;
571 : : }
572 : :
573 : 0 : adapter_create:
574 : 0 : ret = rte_event_dev_info_get(evdev, &evdev_info);
575 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
576 : :
577 : 0 : conf.new_event_threshold = evdev_info.max_num_events;
578 : 0 : conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
579 : 0 : conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
580 : :
581 : : /* Create adapter with default port creation callback */
582 : 0 : ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, mode);
583 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
584 : :
585 : 0 : event.event = dma_response_info.event;
586 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND)
587 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
588 : : TEST_DMA_VCHAN_ID, &event);
589 : : else
590 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
591 : : TEST_DMA_VCHAN_ID, NULL);
592 : :
593 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to add vchan\n");
594 : :
595 [ # # ]: 0 : if (!params.internal_port_op_fwd) {
596 : 0 : ret = rte_event_dma_adapter_event_port_get(TEST_ADAPTER_ID,
597 : : ¶ms.dma_event_port_id);
598 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n");
599 : : }
600 : :
601 : : return TEST_SUCCESS;
602 : : }
603 : :
604 : : static void
605 : 0 : test_dma_adapter_stop(void)
606 : : {
607 : : uint32_t evdev_service_id, adapter_service_id;
608 : :
609 : : /* retrieve service ids & stop services */
610 [ # # ]: 0 : if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID,
611 : : &adapter_service_id) == 0) {
612 : 0 : rte_service_runstate_set(adapter_service_id, 0);
613 : 0 : rte_service_lcore_stop(slcore_id);
614 : 0 : rte_service_lcore_del(slcore_id);
615 : 0 : rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
616 : : }
617 : :
618 [ # # ]: 0 : if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
619 : 0 : rte_service_runstate_set(evdev_service_id, 0);
620 : 0 : rte_service_lcore_stop(slcore_id);
621 : 0 : rte_service_lcore_del(slcore_id);
622 : 0 : rte_dma_stop(TEST_DMA_DEV_ID);
623 : 0 : rte_event_dev_stop(evdev);
624 : : } else {
625 : 0 : rte_dma_stop(TEST_DMA_DEV_ID);
626 : 0 : rte_event_dev_stop(evdev);
627 : : }
628 : 0 : }
629 : :
630 : : static int
631 : 0 : test_dma_adapter_conf(enum rte_event_dma_adapter_mode mode)
632 : : {
633 : : uint32_t evdev_service_id;
634 : : uint8_t qid;
635 : : int ret;
636 : :
637 [ # # ]: 0 : if (!dma_adapter_setup_done) {
638 : 0 : ret = configure_event_dma_adapter(mode);
639 [ # # ]: 0 : if (ret)
640 : : return ret;
641 [ # # ]: 0 : if (!params.internal_port_op_fwd) {
642 : 0 : qid = TEST_DMA_EV_QUEUE_ID;
643 : 0 : ret = rte_event_port_link(evdev,
644 : 0 : params.dma_event_port_id, &qid, NULL, 1);
645 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to link queue %d "
646 : : "port=%u\n", qid,
647 : : params.dma_event_port_id);
648 : : }
649 : 0 : dma_adapter_setup_done = 1;
650 : : }
651 : :
652 : : /* retrieve service ids */
653 [ # # ]: 0 : if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
654 : : /* add a service core and start it */
655 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
656 : : "Failed to add service core");
657 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
658 : : "Failed to start service core");
659 : :
660 : : /* map services to it */
661 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,
662 : : slcore_id, 1), "Failed to map evdev service");
663 : :
664 : : /* set services to running */
665 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,
666 : : 1), "Failed to start evdev service");
667 : : }
668 : :
669 : : /* start the eventdev */
670 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),
671 : : "Failed to start event device");
672 : :
673 : : /* start the dma dev */
674 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_dma_start(TEST_DMA_DEV_ID),
675 : : "Failed to start dma device");
676 : :
677 : : return TEST_SUCCESS;
678 : : }
679 : :
680 : : static int
681 : 0 : test_dma_adapter_conf_op_forward_mode(void)
682 : : {
683 : : enum rte_event_dma_adapter_mode mode;
684 : :
685 : : mode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD;
686 : :
687 : 0 : return test_dma_adapter_conf(mode);
688 : : }
689 : :
690 : : static int
691 : 0 : testsuite_setup(void)
692 : : {
693 : : int ret;
694 : :
695 : 0 : slcore_id = rte_get_next_lcore(-1, 1, 0);
696 [ # # ]: 0 : TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores "
697 : : "are required to run this autotest\n");
698 : :
699 : : /* Setup and start event device. */
700 : 0 : ret = configure_eventdev();
701 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n");
702 : :
703 : : /* Setup and start dma device. */
704 : 0 : ret = configure_dmadev();
705 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "dmadev initialization failed\n");
706 : :
707 : : return TEST_SUCCESS;
708 : : }
709 : :
710 : : static void
711 : 0 : dma_adapter_teardown(void)
712 : : {
713 : : int ret;
714 : :
715 : 0 : ret = rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
716 [ # # ]: 0 : if (ret < 0)
717 : 0 : RTE_LOG(ERR, USER1, "Failed to stop adapter!");
718 : :
719 : 0 : ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
720 : : TEST_DMA_VCHAN_ID);
721 [ # # ]: 0 : if (ret < 0)
722 : 0 : RTE_LOG(ERR, USER1, "Failed to delete vchan!");
723 : :
724 : 0 : ret = rte_event_dma_adapter_free(TEST_ADAPTER_ID);
725 [ # # ]: 0 : if (ret < 0)
726 : 0 : RTE_LOG(ERR, USER1, "Failed to free adapter!");
727 : :
728 : 0 : dma_adapter_setup_done = 0;
729 : 0 : }
730 : :
731 : : static void
732 : 0 : dma_teardown(void)
733 : : {
734 : : /* Free mbuf mempool */
735 [ # # ]: 0 : if (params.src_mbuf_pool != NULL) {
736 : 0 : RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_SRC_MBUFPOOL count %u\n",
737 : : rte_mempool_avail_count(params.src_mbuf_pool));
738 : 0 : rte_mempool_free(params.src_mbuf_pool);
739 : 0 : params.src_mbuf_pool = NULL;
740 : : }
741 : :
742 [ # # ]: 0 : if (params.dst_mbuf_pool != NULL) {
743 : 0 : RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_DST_MBUFPOOL count %u\n",
744 : : rte_mempool_avail_count(params.dst_mbuf_pool));
745 : 0 : rte_mempool_free(params.dst_mbuf_pool);
746 : 0 : params.dst_mbuf_pool = NULL;
747 : : }
748 : :
749 : : /* Free ops mempool */
750 [ # # ]: 0 : if (params.op_mpool != NULL) {
751 : 0 : RTE_LOG(DEBUG, USER1, "EVENT_DMA_OP_POOL count %u\n",
752 : : rte_mempool_avail_count(params.op_mpool));
753 : 0 : rte_mempool_free(params.op_mpool);
754 : 0 : params.op_mpool = NULL;
755 : : }
756 : 0 : }
757 : :
758 : : static void
759 : : eventdev_teardown(void)
760 : : {
761 : 0 : rte_event_dev_stop(evdev);
762 : : }
763 : :
764 : : static void
765 : 0 : testsuite_teardown(void)
766 : : {
767 : 0 : dma_adapter_teardown();
768 : 0 : dma_teardown();
769 : : eventdev_teardown();
770 : 0 : }
771 : :
772 : : static struct unit_test_suite functional_testsuite = {
773 : : .suite_name = "Event dma adapter test suite",
774 : : .setup = testsuite_setup,
775 : : .teardown = testsuite_teardown,
776 : : .unit_test_cases = {
777 : :
778 : : TEST_CASE_ST(NULL, test_dma_adapter_free, test_dma_adapter_create),
779 : :
780 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
781 : : test_dma_adapter_vchan_add_del),
782 : :
783 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
784 : : test_dma_adapter_stats),
785 : :
786 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
787 : : test_dma_adapter_params),
788 : :
789 : : TEST_CASE_ST(test_dma_adapter_conf_op_forward_mode, test_dma_adapter_stop,
790 : : test_with_op_forward_mode),
791 : :
792 : : TEST_CASES_END() /**< NULL terminate unit test array */
793 : : }
794 : : };
795 : :
796 : : static int
797 : 0 : test_event_dma_adapter(void)
798 : : {
799 : 0 : return unit_test_suite_runner(&functional_testsuite);
800 : : }
801 : :
802 : : #endif /* !RTE_EXEC_ENV_WINDOWS */
803 : :
804 : 254 : REGISTER_DRIVER_TEST(event_dma_adapter_autotest, test_event_dma_adapter);
|