Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (c) 2023 Marvell.
3 : : */
4 : :
5 : : #include "test.h"
6 : : #include <string.h>
7 : : #include <rte_common.h>
8 : : #include <rte_malloc.h>
9 : : #include <rte_mempool.h>
10 : : #include <rte_mbuf.h>
11 : : #include <rte_random.h>
12 : :
13 : : #ifdef RTE_EXEC_ENV_WINDOWS
14 : : static int
15 : : test_event_dma_adapter(void)
16 : : {
17 : : printf("event_dma_adapter not supported on Windows, skipping test\n");
18 : : return TEST_SKIPPED;
19 : : }
20 : :
21 : : #else
22 : :
23 : : #include <rte_bus_vdev.h>
24 : : #include <rte_dmadev.h>
25 : : #include <rte_eventdev.h>
26 : : #include <rte_event_dma_adapter.h>
27 : : #include <rte_service.h>
28 : :
29 : : #define NUM_MBUFS (8191)
30 : : #define MBUF_CACHE_SIZE (256)
31 : : #define TEST_APP_PORT_ID 0
32 : : #define TEST_APP_EV_QUEUE_ID 0
33 : : #define TEST_APP_EV_PRIORITY 0
34 : : #define TEST_APP_EV_FLOWID 0xAABB
35 : : #define TEST_DMA_EV_QUEUE_ID 1
36 : : #define TEST_ADAPTER_ID 0
37 : : #define TEST_DMA_DEV_ID 0
38 : : #define TEST_DMA_VCHAN_ID 0
39 : : #define PACKET_LENGTH 1024
40 : : #define NB_TEST_PORTS 1
41 : : #define NB_TEST_QUEUES 2
42 : : #define NUM_CORES 2
43 : : #define DMA_OP_POOL_SIZE 128
44 : : #define TEST_MAX_OP 32
45 : : #define TEST_RINGSIZE 512
46 : :
47 : : #define MBUF_SIZE (RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)
48 : :
49 : : /* Handle log statements in same manner as test macros */
50 : : #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
51 : :
52 : : struct event_dma_adapter_test_params {
53 : : struct rte_mempool *src_mbuf_pool;
54 : : struct rte_mempool *dst_mbuf_pool;
55 : : struct rte_mempool *op_mpool;
56 : : uint8_t dma_event_port_id;
57 : : uint8_t internal_port_op_fwd;
58 : : };
59 : :
60 : : struct rte_event dma_response_info = {
61 : : .queue_id = TEST_APP_EV_QUEUE_ID,
62 : : .sched_type = RTE_SCHED_TYPE_ATOMIC,
63 : : .flow_id = TEST_APP_EV_FLOWID,
64 : : .priority = TEST_APP_EV_PRIORITY,
65 : : .op = RTE_EVENT_OP_NEW,
66 : : };
67 : :
68 : : static struct event_dma_adapter_test_params params;
69 : : static uint8_t dma_adapter_setup_done;
70 : : static uint32_t slcore_id;
71 : : static int evdev;
72 : :
73 : : static int
74 : 0 : send_recv_ev(struct rte_event *ev)
75 : : {
76 : : struct rte_event recv_ev[TEST_MAX_OP];
77 : : uint16_t nb_enqueued = 0;
78 : : int i = 0;
79 : :
80 [ # # ]: 0 : if (params.internal_port_op_fwd) {
81 : 0 : nb_enqueued = rte_event_dma_adapter_enqueue(evdev, TEST_APP_PORT_ID, ev,
82 : : TEST_MAX_OP);
83 : : } else {
84 [ # # ]: 0 : while (nb_enqueued < TEST_MAX_OP) {
85 : 0 : nb_enqueued += rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID,
86 : 0 : &ev[nb_enqueued], TEST_MAX_OP -
87 : : nb_enqueued);
88 : : }
89 : : }
90 : :
91 [ # # ]: 0 : TEST_ASSERT_EQUAL(nb_enqueued, TEST_MAX_OP, "Failed to send event to dma adapter\n");
92 : :
93 [ # # ]: 0 : while (i < TEST_MAX_OP) {
94 [ # # ]: 0 : if (rte_event_dequeue_burst(evdev, TEST_APP_PORT_ID, &recv_ev[i], 1, 0) != 1)
95 : 0 : continue;
96 : 0 : i++;
97 : : }
98 : :
99 : : TEST_ASSERT_EQUAL(i, TEST_MAX_OP, "Test failed. Failed to dequeue events.\n");
100 : :
101 : : return TEST_SUCCESS;
102 : : }
103 : :
104 : : static int
105 : 0 : test_dma_adapter_stats(void)
106 : : {
107 : : struct rte_event_dma_adapter_stats stats;
108 : :
109 : 0 : rte_event_dma_adapter_stats_get(TEST_ADAPTER_ID, &stats);
110 : : printf(" +------------------------------------------------------+\n");
111 : : printf(" + DMA adapter stats for instance %u:\n", TEST_ADAPTER_ID);
112 : 0 : printf(" + Event port poll count 0x%" PRIx64 "\n",
113 : : stats.event_poll_count);
114 : 0 : printf(" + Event dequeue count 0x%" PRIx64 "\n",
115 : : stats.event_deq_count);
116 : 0 : printf(" + DMA dev enqueue count 0x%" PRIx64 "\n",
117 : : stats.dma_enq_count);
118 : 0 : printf(" + DMA dev enqueue failed count 0x%" PRIx64 "\n",
119 : : stats.dma_enq_fail_count);
120 : 0 : printf(" + DMA dev dequeue count 0x%" PRIx64 "\n",
121 : : stats.dma_deq_count);
122 : 0 : printf(" + Event enqueue count 0x%" PRIx64 "\n",
123 : : stats.event_enq_count);
124 : 0 : printf(" + Event enqueue retry count 0x%" PRIx64 "\n",
125 : : stats.event_enq_retry_count);
126 : 0 : printf(" + Event enqueue fail count 0x%" PRIx64 "\n",
127 : : stats.event_enq_fail_count);
128 : : printf(" +------------------------------------------------------+\n");
129 : :
130 : 0 : rte_event_dma_adapter_stats_reset(TEST_ADAPTER_ID);
131 : 0 : return TEST_SUCCESS;
132 : : }
133 : :
134 : : static int
135 : 0 : test_dma_adapter_params(void)
136 : : {
137 : : struct rte_event_dma_adapter_runtime_params out_params;
138 : : struct rte_event_dma_adapter_runtime_params in_params;
139 : : struct rte_event event;
140 : : uint32_t cap;
141 : : int err, rc;
142 : :
143 : 0 : err = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
144 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to get adapter capabilities\n");
145 : :
146 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
147 : 0 : err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
148 : : TEST_DMA_VCHAN_ID, &event);
149 : : } else
150 : 0 : err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
151 : : TEST_DMA_VCHAN_ID, NULL);
152 : :
153 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to add vchan\n");
154 : :
155 : 0 : err = rte_event_dma_adapter_runtime_params_init(&in_params);
156 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
157 : 0 : err = rte_event_dma_adapter_runtime_params_init(&out_params);
158 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
159 : :
160 : : /* Case 1: Get the default value of mbufs processed by adapter */
161 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
162 [ # # ]: 0 : if (err == -ENOTSUP) {
163 : : rc = TEST_SKIPPED;
164 : 0 : goto vchan_del;
165 : : }
166 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
167 : :
168 : : /* Case 2: Set max_nb = 32 (=BATCH_SEIZE) */
169 : 0 : in_params.max_nb = 32;
170 : :
171 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
172 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
173 : :
174 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
175 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
176 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
177 : : in_params.max_nb, out_params.max_nb);
178 : :
179 : : /* Case 3: Set max_nb = 192 */
180 : 0 : in_params.max_nb = 192;
181 : :
182 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
183 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
184 : :
185 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
186 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
187 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
188 : : in_params.max_nb, out_params.max_nb);
189 : :
190 : : /* Case 4: Set max_nb = 256 */
191 : 0 : in_params.max_nb = 256;
192 : :
193 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
194 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
195 : :
196 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
197 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
198 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
199 : : in_params.max_nb, out_params.max_nb);
200 : :
201 : : /* Case 5: Set max_nb = 30(<BATCH_SIZE) */
202 : 0 : in_params.max_nb = 30;
203 : :
204 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
205 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
206 : :
207 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
208 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
209 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
210 : : in_params.max_nb, out_params.max_nb);
211 : :
212 : : /* Case 6: Set max_nb = 512 */
213 : 0 : in_params.max_nb = 512;
214 : :
215 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
216 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
217 : :
218 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
219 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
220 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
221 : : in_params.max_nb, out_params.max_nb);
222 : :
223 : : rc = TEST_SUCCESS;
224 : 0 : vchan_del:
225 : 0 : err = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
226 : : TEST_DMA_VCHAN_ID);
227 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to delete vchan\n");
228 : :
229 : : return rc;
230 : : }
231 : :
232 : : static int
233 : 0 : test_op_forward_mode(void)
234 : : {
235 : : struct rte_mbuf *src_mbuf[TEST_MAX_OP];
236 : : struct rte_mbuf *dst_mbuf[TEST_MAX_OP];
237 : : struct rte_event_dma_adapter_op *op;
238 : : struct rte_event ev[TEST_MAX_OP];
239 : : int ret, i;
240 : :
241 : 0 : ret = rte_pktmbuf_alloc_bulk(params.src_mbuf_pool, src_mbuf, TEST_MAX_OP);
242 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "alloc src mbufs failed.\n");
243 : :
244 : 0 : ret = rte_pktmbuf_alloc_bulk(params.dst_mbuf_pool, dst_mbuf, TEST_MAX_OP);
245 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "alloc dst mbufs failed.\n");
246 : :
247 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
248 : 0 : memset(rte_pktmbuf_mtod(src_mbuf[i], void *), rte_rand(), PACKET_LENGTH);
249 : 0 : memset(rte_pktmbuf_mtod(dst_mbuf[i], void *), 0, PACKET_LENGTH);
250 : : }
251 : :
252 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
253 [ # # ]: 0 : rte_mempool_get(params.op_mpool, (void **)&op);
254 [ # # ]: 0 : TEST_ASSERT_NOT_NULL(op, "Failed to allocate dma operation struct\n");
255 : :
256 : : /* Update Op */
257 [ # # ]: 0 : op->src_dst_seg[0].addr = rte_pktmbuf_iova(src_mbuf[i]);
258 : 0 : op->src_dst_seg[1].addr = rte_pktmbuf_iova(dst_mbuf[i]);
259 : 0 : op->src_dst_seg[0].length = PACKET_LENGTH;
260 : 0 : op->src_dst_seg[1].length = PACKET_LENGTH;
261 : 0 : op->nb_src = 1;
262 : 0 : op->nb_dst = 1;
263 : 0 : op->flags = RTE_DMA_OP_FLAG_SUBMIT;
264 : 0 : op->op_mp = params.op_mpool;
265 : 0 : op->dma_dev_id = TEST_DMA_DEV_ID;
266 : 0 : op->vchan = TEST_DMA_VCHAN_ID;
267 : 0 : op->event_meta = dma_response_info.event;
268 : :
269 : : /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */
270 [ # # ]: 0 : memset(&ev[i], 0, sizeof(struct rte_event));
271 : 0 : ev[i].event = 0;
272 : 0 : ev[i].op = RTE_EVENT_OP_NEW;
273 : 0 : ev[i].event_type = RTE_EVENT_TYPE_DMADEV;
274 [ # # ]: 0 : if (params.internal_port_op_fwd)
275 : 0 : ev[i].queue_id = TEST_APP_EV_QUEUE_ID;
276 : : else
277 : 0 : ev[i].queue_id = TEST_DMA_EV_QUEUE_ID;
278 : 0 : ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
279 : 0 : ev[i].flow_id = 0xAABB;
280 : 0 : ev[i].event_ptr = op;
281 : : }
282 : :
283 : 0 : ret = send_recv_ev(ev);
284 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to dma adapter\n");
285 : :
286 : 0 : test_dma_adapter_stats();
287 : :
288 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
289 : 0 : op = ev[i].event_ptr;
290 : 0 : ret = memcmp(rte_pktmbuf_mtod(src_mbuf[i], void *),
291 : 0 : rte_pktmbuf_mtod(dst_mbuf[i], void *), PACKET_LENGTH);
292 : :
293 [ # # ]: 0 : TEST_ASSERT_EQUAL(ret, 0, "Data mismatch for dma adapter\n");
294 : :
295 [ # # ]: 0 : rte_mempool_put(op->op_mp, op);
296 : : }
297 : :
298 : 0 : rte_pktmbuf_free_bulk(src_mbuf, TEST_MAX_OP);
299 : 0 : rte_pktmbuf_free_bulk(dst_mbuf, TEST_MAX_OP);
300 : :
301 : 0 : return TEST_SUCCESS;
302 : : }
303 : :
304 : : static int
305 : 0 : map_adapter_service_core(void)
306 : : {
307 : : uint32_t adapter_service_id;
308 : : int ret;
309 : :
310 [ # # ]: 0 : if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, &adapter_service_id) == 0) {
311 : : uint32_t core_list[NUM_CORES];
312 : :
313 : 0 : ret = rte_service_lcore_list(core_list, NUM_CORES);
314 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to get service core list!");
315 : :
316 [ # # ]: 0 : if (core_list[0] != slcore_id) {
317 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
318 : : "Failed to add service core");
319 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
320 : : "Failed to start service core");
321 : : }
322 : :
323 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
324 : : adapter_service_id, slcore_id, 1),
325 : : "Failed to map adapter service");
326 : : }
327 : :
328 : : return TEST_SUCCESS;
329 : : }
330 : :
331 : : static int
332 : 0 : test_with_op_forward_mode(void)
333 : : {
334 : : uint32_t cap;
335 : : int ret;
336 : :
337 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
338 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
339 : :
340 [ # # ]: 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
341 : : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
342 : 0 : map_adapter_service_core();
343 : : else {
344 [ # # ]: 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
345 : : return TEST_SKIPPED;
346 : : }
347 : :
348 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_event_dma_adapter_start(TEST_ADAPTER_ID),
349 : : "Failed to start event dma adapter");
350 : :
351 : 0 : ret = test_op_forward_mode();
352 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "DMA - FORWARD mode test failed\n");
353 : : return TEST_SUCCESS;
354 : : }
355 : :
356 : : static int
357 : 0 : configure_dmadev(void)
358 : : {
359 : 0 : const struct rte_dma_conf conf = { .nb_vchans = 1};
360 : 0 : const struct rte_dma_vchan_conf qconf = {
361 : : .direction = RTE_DMA_DIR_MEM_TO_MEM,
362 : : .nb_desc = TEST_RINGSIZE,
363 : : };
364 : : struct rte_dma_info info;
365 : : unsigned int elt_size;
366 : : int ret;
367 : :
368 : 0 : ret = rte_dma_count_avail();
369 [ # # ]: 0 : RTE_TEST_ASSERT_FAIL(ret, "No dma devices found!\n");
370 : :
371 : 0 : ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
372 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Error with rte_dma_info_get()\n");
373 : :
374 [ # # ]: 0 : if (info.max_vchans < 1)
375 : 0 : RTE_LOG(ERR, USER1, "Error, no channels available on device id %u\n",
376 : : TEST_DMA_DEV_ID);
377 : :
378 [ # # ]: 0 : if (rte_dma_configure(TEST_DMA_DEV_ID, &conf) != 0)
379 : 0 : RTE_LOG(ERR, USER1, "Error with rte_dma_configure()\n");
380 : :
381 [ # # ]: 0 : if (rte_dma_vchan_setup(TEST_DMA_DEV_ID, TEST_DMA_VCHAN_ID, &qconf) < 0)
382 : 0 : RTE_LOG(ERR, USER1, "Error with vchan configuration\n");
383 : :
384 : 0 : ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
385 [ # # # # ]: 0 : if (ret != 0 || info.nb_vchans != 1)
386 : 0 : RTE_LOG(ERR, USER1, "Error, no configured vhcan reported on device id %u\n",
387 : : TEST_DMA_DEV_ID);
388 : :
389 : 0 : params.src_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_SRC_MBUFPOOL", NUM_MBUFS,
390 : : MBUF_CACHE_SIZE, 0, MBUF_SIZE,
391 : 0 : rte_socket_id());
392 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.src_mbuf_pool, "Can't create DMA_SRC_MBUFPOOL\n");
393 : :
394 : 0 : params.dst_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_DST_MBUFPOOL", NUM_MBUFS,
395 : : MBUF_CACHE_SIZE, 0, MBUF_SIZE,
396 : 0 : rte_socket_id());
397 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n");
398 : :
399 : : elt_size = sizeof(struct rte_event_dma_adapter_op) + (sizeof(struct rte_dma_sge) * 2);
400 : 0 : params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0,
401 : 0 : 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
402 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n");
403 : :
404 : : return TEST_SUCCESS;
405 : : }
406 : :
407 : : static inline void
408 : 0 : evdev_set_conf_values(struct rte_event_dev_config *dev_conf, struct rte_event_dev_info *info)
409 : : {
410 : : memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
411 : 0 : dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
412 : 0 : dev_conf->nb_event_ports = NB_TEST_PORTS;
413 : 0 : dev_conf->nb_event_queues = NB_TEST_QUEUES;
414 : 0 : dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
415 : 0 : dev_conf->nb_event_port_dequeue_depth =
416 : 0 : info->max_event_port_dequeue_depth;
417 : 0 : dev_conf->nb_event_port_enqueue_depth =
418 : 0 : info->max_event_port_enqueue_depth;
419 : : dev_conf->nb_event_port_enqueue_depth =
420 : : info->max_event_port_enqueue_depth;
421 : 0 : dev_conf->nb_events_limit =
422 : 0 : info->max_num_events;
423 : 0 : }
424 : :
425 : : static int
426 : 0 : configure_eventdev(void)
427 : : {
428 : : struct rte_event_queue_conf queue_conf;
429 : : struct rte_event_dev_config devconf;
430 : : struct rte_event_dev_info info;
431 : : uint32_t queue_count;
432 : : uint32_t port_count;
433 : : uint8_t qid;
434 : : int ret;
435 : :
436 [ # # ]: 0 : if (!rte_event_dev_count()) {
437 : : /* If there is no hardware eventdev, or no software vdev was
438 : : * specified on the command line, create an instance of
439 : : * event_sw.
440 : : */
441 : 0 : LOG_DBG("Failed to find a valid event device... "
442 : : "testing with event_sw device\n");
443 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
444 : : "Error creating eventdev");
445 : 0 : evdev = rte_event_dev_get_dev_id("event_sw0");
446 : : }
447 : :
448 : 0 : ret = rte_event_dev_info_get(evdev, &info);
449 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n");
450 : :
451 : 0 : evdev_set_conf_values(&devconf, &info);
452 : :
453 : 0 : ret = rte_event_dev_configure(evdev, &devconf);
454 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n");
455 : :
456 : : /* Set up event queue */
457 : 0 : ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count);
458 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n");
459 [ # # ]: 0 : TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n");
460 : :
461 : 0 : qid = TEST_APP_EV_QUEUE_ID;
462 : 0 : ret = rte_event_queue_setup(evdev, qid, NULL);
463 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid);
464 : :
465 : 0 : queue_conf.nb_atomic_flows = info.max_event_queue_flows;
466 : 0 : queue_conf.nb_atomic_order_sequences = 32;
467 : 0 : queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
468 : 0 : queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
469 : 0 : queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
470 : :
471 : 0 : qid = TEST_DMA_EV_QUEUE_ID;
472 : 0 : ret = rte_event_queue_setup(evdev, qid, &queue_conf);
473 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid);
474 : :
475 : : /* Set up event port */
476 : 0 : ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
477 : : &port_count);
478 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Port count get failed\n");
479 [ # # ]: 0 : TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n");
480 : :
481 : 0 : ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);
482 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n",
483 : : TEST_APP_PORT_ID);
484 : :
485 : 0 : qid = TEST_APP_EV_QUEUE_ID;
486 : 0 : ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);
487 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n",
488 : : TEST_APP_PORT_ID);
489 : :
490 : : return TEST_SUCCESS;
491 : : }
492 : :
493 : : static void
494 : 0 : test_dma_adapter_free(void)
495 : : {
496 : 0 : rte_event_dma_adapter_free(TEST_ADAPTER_ID);
497 : 0 : }
498 : :
499 : : static int
500 : 0 : test_dma_adapter_create(void)
501 : : {
502 : 0 : struct rte_event_dev_info evdev_info = {0};
503 : 0 : struct rte_event_port_conf conf = {0};
504 : : int ret;
505 : :
506 : 0 : ret = rte_event_dev_info_get(evdev, &evdev_info);
507 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
508 : :
509 : 0 : conf.new_event_threshold = evdev_info.max_num_events;
510 : 0 : conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
511 : 0 : conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
512 : :
513 : : /* Create adapter with default port creation callback */
514 : 0 : ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, 0);
515 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
516 : :
517 : : return TEST_SUCCESS;
518 : : }
519 : :
520 : : static int
521 : 0 : test_dma_adapter_vchan_add_del(void)
522 : : {
523 : : struct rte_event event;
524 : : uint32_t cap;
525 : : int ret;
526 : :
527 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
528 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
529 : :
530 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
531 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
532 : : TEST_DMA_VCHAN_ID, &event);
533 : : } else
534 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
535 : : TEST_DMA_VCHAN_ID, NULL);
536 : :
537 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create add vchan\n");
538 : :
539 : 0 : ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
540 : : TEST_DMA_VCHAN_ID);
541 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to delete vchan\n");
542 : :
543 : : return TEST_SUCCESS;
544 : : }
545 : :
546 : : static int
547 : 0 : configure_event_dma_adapter(enum rte_event_dma_adapter_mode mode)
548 : : {
549 : 0 : struct rte_event_dev_info evdev_info = {0};
550 : 0 : struct rte_event_port_conf conf = {0};
551 : : struct rte_event event;
552 : : uint32_t cap;
553 : : int ret;
554 : :
555 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
556 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
557 : :
558 : : /* Skip mode and capability mismatch check for SW eventdev */
559 : 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
560 [ # # ]: 0 : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
561 : : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))
562 : 0 : goto adapter_create;
563 : :
564 [ # # ]: 0 : if (mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) {
565 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)
566 : 0 : params.internal_port_op_fwd = 1;
567 : : else
568 : : return -ENOTSUP;
569 : : }
570 : :
571 : 0 : adapter_create:
572 : 0 : ret = rte_event_dev_info_get(evdev, &evdev_info);
573 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
574 : :
575 : 0 : conf.new_event_threshold = evdev_info.max_num_events;
576 : 0 : conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
577 : 0 : conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
578 : :
579 : : /* Create adapter with default port creation callback */
580 : 0 : ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, mode);
581 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
582 : :
583 : 0 : event.event = dma_response_info.event;
584 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND)
585 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
586 : : TEST_DMA_VCHAN_ID, &event);
587 : : else
588 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
589 : : TEST_DMA_VCHAN_ID, NULL);
590 : :
591 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to add vchan\n");
592 : :
593 [ # # ]: 0 : if (!params.internal_port_op_fwd) {
594 : 0 : ret = rte_event_dma_adapter_event_port_get(TEST_ADAPTER_ID,
595 : : ¶ms.dma_event_port_id);
596 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n");
597 : : }
598 : :
599 : : return TEST_SUCCESS;
600 : : }
601 : :
602 : : static void
603 : 0 : test_dma_adapter_stop(void)
604 : : {
605 : : uint32_t evdev_service_id, adapter_service_id;
606 : :
607 : : /* retrieve service ids & stop services */
608 [ # # ]: 0 : if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID,
609 : : &adapter_service_id) == 0) {
610 : 0 : rte_service_runstate_set(adapter_service_id, 0);
611 : 0 : rte_service_lcore_stop(slcore_id);
612 : 0 : rte_service_lcore_del(slcore_id);
613 : 0 : rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
614 : : }
615 : :
616 [ # # ]: 0 : if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
617 : 0 : rte_service_runstate_set(evdev_service_id, 0);
618 : 0 : rte_service_lcore_stop(slcore_id);
619 : 0 : rte_service_lcore_del(slcore_id);
620 : 0 : rte_dma_stop(TEST_DMA_DEV_ID);
621 : 0 : rte_event_dev_stop(evdev);
622 : : } else {
623 : 0 : rte_dma_stop(TEST_DMA_DEV_ID);
624 : 0 : rte_event_dev_stop(evdev);
625 : : }
626 : 0 : }
627 : :
628 : : static int
629 : 0 : test_dma_adapter_conf(enum rte_event_dma_adapter_mode mode)
630 : : {
631 : : uint32_t evdev_service_id;
632 : : uint8_t qid;
633 : : int ret;
634 : :
635 [ # # ]: 0 : if (!dma_adapter_setup_done) {
636 : 0 : ret = configure_event_dma_adapter(mode);
637 [ # # ]: 0 : if (ret)
638 : : return ret;
639 [ # # ]: 0 : if (!params.internal_port_op_fwd) {
640 : 0 : qid = TEST_DMA_EV_QUEUE_ID;
641 : 0 : ret = rte_event_port_link(evdev,
642 : 0 : params.dma_event_port_id, &qid, NULL, 1);
643 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to link queue %d "
644 : : "port=%u\n", qid,
645 : : params.dma_event_port_id);
646 : : }
647 : 0 : dma_adapter_setup_done = 1;
648 : : }
649 : :
650 : : /* retrieve service ids */
651 [ # # ]: 0 : if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
652 : : /* add a service core and start it */
653 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
654 : : "Failed to add service core");
655 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
656 : : "Failed to start service core");
657 : :
658 : : /* map services to it */
659 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,
660 : : slcore_id, 1), "Failed to map evdev service");
661 : :
662 : : /* set services to running */
663 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,
664 : : 1), "Failed to start evdev service");
665 : : }
666 : :
667 : : /* start the eventdev */
668 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),
669 : : "Failed to start event device");
670 : :
671 : : /* start the dma dev */
672 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_dma_start(TEST_DMA_DEV_ID),
673 : : "Failed to start dma device");
674 : :
675 : : return TEST_SUCCESS;
676 : : }
677 : :
678 : : static int
679 : 0 : test_dma_adapter_conf_op_forward_mode(void)
680 : : {
681 : : enum rte_event_dma_adapter_mode mode;
682 : :
683 : : mode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD;
684 : :
685 : 0 : return test_dma_adapter_conf(mode);
686 : : }
687 : :
688 : : static int
689 : 0 : testsuite_setup(void)
690 : : {
691 : : int ret;
692 : :
693 : 0 : slcore_id = rte_get_next_lcore(-1, 1, 0);
694 [ # # ]: 0 : TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores "
695 : : "are required to run this autotest\n");
696 : :
697 : : /* Setup and start event device. */
698 : 0 : ret = configure_eventdev();
699 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n");
700 : :
701 : : /* Setup and start dma device. */
702 : 0 : ret = configure_dmadev();
703 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "dmadev initialization failed\n");
704 : :
705 : : return TEST_SUCCESS;
706 : : }
707 : :
708 : : static void
709 : 0 : dma_adapter_teardown(void)
710 : : {
711 : : int ret;
712 : :
713 : 0 : ret = rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
714 [ # # ]: 0 : if (ret < 0)
715 : 0 : RTE_LOG(ERR, USER1, "Failed to stop adapter!");
716 : :
717 : 0 : ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
718 : : TEST_DMA_VCHAN_ID);
719 [ # # ]: 0 : if (ret < 0)
720 : 0 : RTE_LOG(ERR, USER1, "Failed to delete vchan!");
721 : :
722 : 0 : ret = rte_event_dma_adapter_free(TEST_ADAPTER_ID);
723 [ # # ]: 0 : if (ret < 0)
724 : 0 : RTE_LOG(ERR, USER1, "Failed to free adapter!");
725 : :
726 : 0 : dma_adapter_setup_done = 0;
727 : 0 : }
728 : :
729 : : static void
730 : 0 : dma_teardown(void)
731 : : {
732 : : /* Free mbuf mempool */
733 [ # # ]: 0 : if (params.src_mbuf_pool != NULL) {
734 : 0 : RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_SRC_MBUFPOOL count %u\n",
735 : : rte_mempool_avail_count(params.src_mbuf_pool));
736 : 0 : rte_mempool_free(params.src_mbuf_pool);
737 : 0 : params.src_mbuf_pool = NULL;
738 : : }
739 : :
740 [ # # ]: 0 : if (params.dst_mbuf_pool != NULL) {
741 : 0 : RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_DST_MBUFPOOL count %u\n",
742 : : rte_mempool_avail_count(params.dst_mbuf_pool));
743 : 0 : rte_mempool_free(params.dst_mbuf_pool);
744 : 0 : params.dst_mbuf_pool = NULL;
745 : : }
746 : :
747 : : /* Free ops mempool */
748 [ # # ]: 0 : if (params.op_mpool != NULL) {
749 : 0 : RTE_LOG(DEBUG, USER1, "EVENT_DMA_OP_POOL count %u\n",
750 : : rte_mempool_avail_count(params.op_mpool));
751 : 0 : rte_mempool_free(params.op_mpool);
752 : 0 : params.op_mpool = NULL;
753 : : }
754 : 0 : }
755 : :
756 : : static void
757 : : eventdev_teardown(void)
758 : : {
759 : 0 : rte_event_dev_stop(evdev);
760 : : }
761 : :
762 : : static void
763 : 0 : testsuite_teardown(void)
764 : : {
765 : 0 : dma_adapter_teardown();
766 : 0 : dma_teardown();
767 : : eventdev_teardown();
768 : 0 : }
769 : :
770 : : static struct unit_test_suite functional_testsuite = {
771 : : .suite_name = "Event dma adapter test suite",
772 : : .setup = testsuite_setup,
773 : : .teardown = testsuite_teardown,
774 : : .unit_test_cases = {
775 : :
776 : : TEST_CASE_ST(NULL, test_dma_adapter_free, test_dma_adapter_create),
777 : :
778 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
779 : : test_dma_adapter_vchan_add_del),
780 : :
781 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
782 : : test_dma_adapter_stats),
783 : :
784 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
785 : : test_dma_adapter_params),
786 : :
787 : : TEST_CASE_ST(test_dma_adapter_conf_op_forward_mode, test_dma_adapter_stop,
788 : : test_with_op_forward_mode),
789 : :
790 : : TEST_CASES_END() /**< NULL terminate unit test array */
791 : : }
792 : : };
793 : :
794 : : static int
795 : 0 : test_event_dma_adapter(void)
796 : : {
797 : 0 : return unit_test_suite_runner(&functional_testsuite);
798 : : }
799 : :
800 : : #endif /* !RTE_EXEC_ENV_WINDOWS */
801 : :
802 : 251 : REGISTER_DRIVER_TEST(event_dma_adapter_autotest, test_event_dma_adapter);
|