Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2021 HiSilicon Limited
3 : : * Copyright(c) 2021 Intel Corporation
4 : : */
5 : :
6 : : #include <inttypes.h>
7 : :
8 : : #include <rte_dmadev.h>
9 : : #include <rte_mbuf.h>
10 : : #include <rte_pause.h>
11 : : #include <rte_cycles.h>
12 : : #include <rte_random.h>
13 : : #include <rte_bus_vdev.h>
14 : : #include <rte_dmadev_pmd.h>
15 : :
16 : : #include "test.h"
17 : : #include "test_dmadev_api.h"
18 : :
19 : : #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
20 : :
21 : : #define TEST_NAME_MAX_LEN 80
22 : : #define TEST_RINGSIZE 512
23 : : #define COPY_LEN 2048
24 : :
25 : : static struct rte_dma_info info;
26 : : static struct rte_mempool *pool;
27 : : static bool check_err_stats;
28 : : static int16_t test_dev_id;
29 : : static uint16_t id_count;
30 : : static uint16_t vchan;
31 : :
32 : : enum {
33 : : TEST_PARAM_REMOTE_ADDR = 0,
34 : : TEST_PARAM_MAX,
35 : : };
36 : :
37 : : static const char * const dma_test_param[] = {
38 : : [TEST_PARAM_REMOTE_ADDR] = "remote_addr",
39 : : };
40 : :
41 : : static uint64_t env_test_param[TEST_PARAM_MAX];
42 : :
43 : : enum {
44 : : TEST_M2D_AUTO_FREE = 0,
45 : : TEST_MAX,
46 : : };
47 : :
48 : : struct dma_add_test {
49 : : const char *name;
50 : : bool enabled;
51 : : };
52 : :
53 : : struct dma_add_test dma_add_test[] = {
54 : : [TEST_M2D_AUTO_FREE] = {.name = "m2d_auto_free", .enabled = false},
55 : : };
56 : :
57 : : static void
58 : : __rte_format_printf(3, 4)
59 : 0 : print_err(const char *func, int lineno, const char *format, ...)
60 : : {
61 : : va_list ap;
62 : :
63 : 0 : fprintf(stderr, "In %s:%d - ", func, lineno);
64 : 0 : va_start(ap, format);
65 : 0 : vfprintf(stderr, format, ap);
66 : 0 : va_end(ap);
67 : 0 : }
68 : :
69 : : struct runtest_param {
70 : : const char name[TEST_NAME_MAX_LEN];
71 : : int (*test_fn)(int16_t dev_id, uint16_t vchan);
72 : : int iterations;
73 : : };
74 : :
75 : : static int
76 : 0 : runtest(const void *args)
77 : : {
78 : : int (*test_fn)(int16_t dev_id, uint16_t vchan);
79 : : const struct runtest_param *param = args;
80 : : struct rte_dma_stats stats;
81 : : const char *printable;
82 : : int iterations;
83 : : int16_t dev_id;
84 : : int i;
85 : :
86 : 0 : printable = param->name;
87 : 0 : iterations = param->iterations;
88 : 0 : test_fn = param->test_fn;
89 : 0 : dev_id = test_dev_id;
90 : :
91 : 0 : rte_dma_stats_reset(dev_id, vchan);
92 : 0 : printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable,
93 [ # # ]: 0 : check_err_stats ? " " : "(errors expected)");
94 [ # # ]: 0 : for (i = 0; i < iterations; i++) {
95 [ # # ]: 0 : if (test_fn(dev_id, vchan) < 0)
96 : : return -1;
97 : :
98 : 0 : rte_dma_stats_get(dev_id, 0, &stats);
99 : 0 : printf("Ops submitted: %"PRIu64"\t", stats.submitted);
100 : 0 : printf("Ops completed: %"PRIu64"\t", stats.completed);
101 : 0 : printf("Errors: %"PRIu64"\r", stats.errors);
102 : :
103 [ # # ]: 0 : if (stats.completed != stats.submitted)
104 : 0 : ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
105 [ # # # # ]: 0 : if (check_err_stats && stats.errors != 0)
106 : 0 : ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
107 : : }
108 : : printf("\n");
109 : 0 : return 0;
110 : : }
111 : :
112 : : static void
113 : 0 : await_hw(int16_t dev_id, uint16_t vchan)
114 : : {
115 : : enum rte_dma_vchan_status st;
116 : :
117 [ # # ]: 0 : if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) {
118 : : /* for drivers that don't support this op, just sleep for 1 millisecond */
119 : 0 : rte_delay_us_sleep(1000);
120 : 0 : return;
121 : : }
122 : :
123 : : /* for those that do, *max* end time is one second from now, but all should be faster */
124 : 0 : const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz();
125 [ # # # # ]: 0 : while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) {
126 : : rte_pause();
127 : 0 : rte_dma_vchan_status(dev_id, vchan, &st);
128 : : }
129 : : }
130 : :
131 : : /* run a series of copy tests just using some different options for enqueues and completions */
132 : : static int
133 : 0 : do_multi_copies(int16_t dev_id, uint16_t vchan,
134 : : int split_batches, /* submit 2 x 16 or 1 x 32 burst */
135 : : int split_completions, /* gather 2 x 16 or 1 x 32 completions */
136 : : int use_completed_status) /* use completed or completed_status function */
137 : : {
138 : : struct rte_mbuf *srcs[32], *dsts[32];
139 : : enum rte_dma_status_code sc[32];
140 : : unsigned int i, j;
141 : 0 : bool dma_err = false;
142 : :
143 : : /* Enqueue burst of copies and hit doorbell */
144 [ # # ]: 0 : for (i = 0; i < RTE_DIM(srcs); i++) {
145 : : uint64_t *src_data;
146 : :
147 [ # # ]: 0 : if (split_batches && i == RTE_DIM(srcs) / 2)
148 : 0 : rte_dma_submit(dev_id, vchan);
149 : :
150 : 0 : srcs[i] = rte_pktmbuf_alloc(pool);
151 : 0 : dsts[i] = rte_pktmbuf_alloc(pool);
152 [ # # # # ]: 0 : if (srcs[i] == NULL || dsts[i] == NULL)
153 : 0 : ERR_RETURN("Error allocating buffers\n");
154 : :
155 : 0 : src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
156 [ # # ]: 0 : for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
157 : 0 : src_data[j] = rte_rand();
158 : :
159 : 0 : if (rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(srcs[i]),
160 [ # # ]: 0 : rte_mbuf_data_iova(dsts[i]), COPY_LEN, 0) != id_count++)
161 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
162 : : }
163 : 0 : rte_dma_submit(dev_id, vchan);
164 : :
165 : 0 : await_hw(dev_id, vchan);
166 : :
167 [ # # ]: 0 : if (split_completions) {
168 : : /* gather completions in two halves */
169 : : uint16_t half_len = RTE_DIM(srcs) / 2;
170 : 0 : int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
171 [ # # # # ]: 0 : if (ret != half_len || dma_err)
172 : 0 : ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
173 : : ret, half_len, dma_err);
174 : :
175 : 0 : ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
176 [ # # # # ]: 0 : if (ret != half_len || dma_err)
177 : 0 : ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
178 : : ret, half_len, dma_err);
179 : : } else {
180 : : /* gather all completions in one go, using either
181 : : * completed or completed_status fns
182 : : */
183 [ # # ]: 0 : if (!use_completed_status) {
184 : 0 : int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
185 [ # # # # ]: 0 : if (n != RTE_DIM(srcs) || dma_err)
186 : 0 : ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n",
187 : : n, RTE_DIM(srcs), dma_err);
188 : : } else {
189 : 0 : int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc);
190 [ # # ]: 0 : if (n != RTE_DIM(srcs))
191 : 0 : ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n",
192 : : n, RTE_DIM(srcs));
193 : :
194 [ # # ]: 0 : for (j = 0; j < (uint16_t)n; j++)
195 [ # # ]: 0 : if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL)
196 : 0 : ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n",
197 : : j, sc[j]);
198 : : }
199 : : }
200 : :
201 : : /* check for empty */
202 : : int ret = use_completed_status ?
203 [ # # ]: 0 : rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) :
204 : : rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
205 [ # # ]: 0 : if (ret != 0)
206 : 0 : ERR_RETURN("Error with completion check - ops unexpectedly returned\n");
207 : :
208 [ # # ]: 0 : for (i = 0; i < RTE_DIM(srcs); i++) {
209 : : char *src_data, *dst_data;
210 : :
211 : 0 : src_data = rte_pktmbuf_mtod(srcs[i], char *);
212 : 0 : dst_data = rte_pktmbuf_mtod(dsts[i], char *);
213 [ # # ]: 0 : for (j = 0; j < COPY_LEN; j++)
214 [ # # ]: 0 : if (src_data[j] != dst_data[j])
215 : 0 : ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j);
216 : :
217 : 0 : rte_pktmbuf_free(srcs[i]);
218 : 0 : rte_pktmbuf_free(dsts[i]);
219 : : }
220 : : return 0;
221 : : }
222 : :
223 : : static int
224 : 0 : test_single_copy(int16_t dev_id, uint16_t vchan)
225 : : {
226 : : uint16_t i;
227 : : uint16_t id;
228 : : enum rte_dma_status_code status;
229 : : struct rte_mbuf *src, *dst;
230 : : char *src_data, *dst_data;
231 : :
232 : 0 : src = rte_pktmbuf_alloc(pool);
233 : 0 : dst = rte_pktmbuf_alloc(pool);
234 : 0 : src_data = rte_pktmbuf_mtod(src, char *);
235 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
236 : :
237 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
238 : 0 : src_data[i] = rte_rand() & 0xFF;
239 : :
240 : 0 : id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst),
241 : : COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT);
242 [ # # ]: 0 : if (id != id_count)
243 : 0 : ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n",
244 : : id, id_count);
245 : :
246 : : /* give time for copy to finish, then check it was done */
247 : 0 : await_hw(dev_id, vchan);
248 : :
249 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
250 [ # # ]: 0 : if (dst_data[i] != src_data[i])
251 : 0 : ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i,
252 : : dst_data[i], src_data[i]);
253 : :
254 : : /* now check completion works */
255 : 0 : id = ~id;
256 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
257 : 0 : ERR_RETURN("Error with rte_dma_completed\n");
258 : :
259 [ # # ]: 0 : if (id != id_count)
260 : 0 : ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
261 : : id, id_count);
262 : :
263 : : /* check for completed and id when no job done */
264 : 0 : id = ~id;
265 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
266 : 0 : ERR_RETURN("Error with rte_dma_completed when no job done\n");
267 [ # # ]: 0 : if (id != id_count)
268 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
269 : : id, id_count);
270 : :
271 : : /* check for completed_status and id when no job done */
272 : 0 : id = ~id;
273 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
274 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
275 [ # # ]: 0 : if (id != id_count)
276 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
277 : : id, id_count);
278 : :
279 : 0 : rte_pktmbuf_free(src);
280 : 0 : rte_pktmbuf_free(dst);
281 : :
282 : : /* now check completion returns nothing more */
283 [ # # ]: 0 : if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
284 : 0 : ERR_RETURN("Error with rte_dma_completed in empty check\n");
285 : :
286 : 0 : id_count++;
287 : :
288 : 0 : return 0;
289 : : }
290 : :
291 : : static int
292 : 0 : test_enqueue_copies(int16_t dev_id, uint16_t vchan)
293 : : {
294 : : unsigned int i;
295 : :
296 : : /* test doing a single copy */
297 [ # # ]: 0 : if (test_single_copy(dev_id, vchan) < 0)
298 : : return -1;
299 : :
300 : : /* test doing a multiple single copies */
301 : : do {
302 : : uint16_t id;
303 : : const uint16_t max_ops = 4;
304 : : struct rte_mbuf *src, *dst;
305 : : char *src_data, *dst_data;
306 : : uint16_t count;
307 : :
308 : 0 : src = rte_pktmbuf_alloc(pool);
309 : 0 : dst = rte_pktmbuf_alloc(pool);
310 : 0 : src_data = rte_pktmbuf_mtod(src, char *);
311 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
312 : :
313 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
314 : 0 : src_data[i] = rte_rand() & 0xFF;
315 : :
316 : : /* perform the same copy <max_ops> times */
317 [ # # ]: 0 : for (i = 0; i < max_ops; i++)
318 : 0 : if (rte_dma_copy(dev_id, vchan,
319 : 0 : rte_pktmbuf_iova(src),
320 : 0 : rte_pktmbuf_iova(dst),
321 [ # # ]: 0 : COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++)
322 : 0 : ERR_RETURN("Error with rte_dma_copy\n");
323 : :
324 : 0 : await_hw(dev_id, vchan);
325 : :
326 : : count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL);
327 [ # # ]: 0 : if (count != max_ops)
328 : 0 : ERR_RETURN("Error with rte_dma_completed, got %u not %u\n",
329 : : count, max_ops);
330 : :
331 [ # # ]: 0 : if (id != id_count - 1)
332 : 0 : ERR_RETURN("Error, incorrect job id returned: got %u not %u\n",
333 : : id, id_count - 1);
334 : :
335 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
336 [ # # ]: 0 : if (dst_data[i] != src_data[i])
337 : 0 : ERR_RETURN("Data mismatch at char %u\n", i);
338 : :
339 : 0 : rte_pktmbuf_free(src);
340 : 0 : rte_pktmbuf_free(dst);
341 : : } while (0);
342 : :
343 : : /* test doing multiple copies */
344 : 0 : return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */
345 : : /* enqueue 2 batches and then complete both */
346 [ # # ]: 0 : || do_multi_copies(dev_id, vchan, 1, 0, 0)
347 : : /* enqueue 1 batch, then complete in two halves */
348 [ # # ]: 0 : || do_multi_copies(dev_id, vchan, 0, 1, 0)
349 : : /* test using completed_status in place of regular completed API */
350 [ # # # # ]: 0 : || do_multi_copies(dev_id, vchan, 0, 0, 1);
351 : : }
352 : :
353 : : static int
354 : 0 : test_stop_start(int16_t dev_id, uint16_t vchan)
355 : : {
356 : : /* device is already started on input, should be (re)started on output */
357 : :
358 : 0 : uint16_t id = 0;
359 : 0 : enum rte_dma_status_code status = RTE_DMA_STATUS_SUCCESSFUL;
360 : :
361 : : /* - test stopping a device works ok,
362 : : * - then do a start-stop without doing a copy
363 : : * - finally restart the device
364 : : * checking for errors at each stage, and validating we can still copy at the end.
365 : : */
366 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
367 : 0 : ERR_RETURN("Error stopping device\n");
368 : :
369 [ # # ]: 0 : if (rte_dma_start(dev_id) < 0)
370 : 0 : ERR_RETURN("Error restarting device\n");
371 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
372 : 0 : ERR_RETURN("Error stopping device after restart (no jobs executed)\n");
373 : :
374 [ # # ]: 0 : if (rte_dma_start(dev_id) < 0)
375 : 0 : ERR_RETURN("Error restarting device after multiple stop-starts\n");
376 : :
377 : : /* before doing a copy, we need to know what the next id will be it should
378 : : * either be:
379 : : * - the last completed job before start if driver does not reset id on stop
380 : : * - or -1 i.e. next job is 0, if driver does reset the job ids on stop
381 : : */
382 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
383 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
384 : 0 : id += 1; /* id_count is next job id */
385 [ # # # # ]: 0 : if (id != id_count && id != 0)
386 : 0 : ERR_RETURN("Unexpected next id from device after stop-start. Got %u, expected %u or 0\n",
387 : : id, id_count);
388 : :
389 : 0 : id_count = id;
390 [ # # ]: 0 : if (test_single_copy(dev_id, vchan) < 0)
391 : 0 : ERR_RETURN("Error performing copy after device restart\n");
392 : : return 0;
393 : : }
394 : :
395 : : static int
396 : 0 : test_enqueue_sg_copies(int16_t dev_id, uint16_t vchan)
397 : : {
398 : : unsigned int src_len, dst_len, n_sge, len, i, j, k;
399 : : char orig_src[COPY_LEN], orig_dst[COPY_LEN];
400 : 0 : struct rte_dma_info info = { 0 };
401 : : enum rte_dma_status_code status;
402 : : uint16_t id, n_src, n_dst;
403 : :
404 [ # # ]: 0 : if (rte_dma_info_get(dev_id, &info) < 0)
405 : 0 : ERR_RETURN("Failed to get dev info");
406 : :
407 [ # # ]: 0 : if (info.max_sges < 2)
408 : 0 : ERR_RETURN("Test needs minimum 2 SG pointers");
409 : :
410 : : n_sge = info.max_sges;
411 : :
412 [ # # ]: 0 : for (n_src = 1; n_src <= n_sge; n_src++) {
413 [ # # ]: 0 : for (n_dst = 1; n_dst <= n_sge; n_dst++) {
414 : : /* Normalize SG buffer lengths */
415 : : len = COPY_LEN;
416 : 0 : len -= (len % (n_src * n_dst));
417 : 0 : dst_len = len / n_dst;
418 : 0 : src_len = len / n_src;
419 : :
420 : 0 : struct rte_dma_sge sg_src[n_sge], sg_dst[n_sge];
421 : 0 : struct rte_mbuf *src[n_sge], *dst[n_sge];
422 : 0 : char *src_data[n_sge], *dst_data[n_sge];
423 : :
424 [ # # ]: 0 : for (i = 0 ; i < len; i++)
425 : 0 : orig_src[i] = rte_rand() & 0xFF;
426 : :
427 : 0 : memset(orig_dst, 0, len);
428 : :
429 [ # # ]: 0 : for (i = 0; i < n_src; i++) {
430 : 0 : src[i] = rte_pktmbuf_alloc(pool);
431 : : RTE_ASSERT(src[i] != NULL);
432 : 0 : sg_src[i].addr = rte_pktmbuf_iova(src[i]);
433 : 0 : sg_src[i].length = src_len;
434 : 0 : src_data[i] = rte_pktmbuf_mtod(src[i], char *);
435 : : }
436 : :
437 [ # # ]: 0 : for (k = 0; k < n_dst; k++) {
438 : 0 : dst[k] = rte_pktmbuf_alloc(pool);
439 : : RTE_ASSERT(dst[k] != NULL);
440 : 0 : sg_dst[k].addr = rte_pktmbuf_iova(dst[k]);
441 : 0 : sg_dst[k].length = dst_len;
442 : 0 : dst_data[k] = rte_pktmbuf_mtod(dst[k], char *);
443 : : }
444 : :
445 [ # # ]: 0 : for (i = 0; i < n_src; i++) {
446 [ # # ]: 0 : for (j = 0; j < src_len; j++)
447 : 0 : src_data[i][j] = orig_src[i * src_len + j];
448 : : }
449 : :
450 [ # # ]: 0 : for (k = 0; k < n_dst; k++)
451 : 0 : memset(dst_data[k], 0, dst_len);
452 : :
453 : : printf("\tsrc segs: %2d [seg len: %4d] - dst segs: %2d [seg len : %4d]\n",
454 : : n_src, src_len, n_dst, dst_len);
455 : :
456 : 0 : id = rte_dma_copy_sg(dev_id, vchan, sg_src, sg_dst, n_src, n_dst,
457 : : RTE_DMA_OP_FLAG_SUBMIT);
458 : :
459 [ # # ]: 0 : if (id != id_count)
460 : 0 : ERR_RETURN("Error with rte_dma_copy_sg, got %u, expected %u\n",
461 : : id, id_count);
462 : :
463 : : /* Give time for copy to finish, then check it was done */
464 : 0 : await_hw(dev_id, vchan);
465 : :
466 [ # # ]: 0 : for (k = 0; k < n_dst; k++)
467 : 0 : memcpy((&orig_dst[0] + k * dst_len), dst_data[k], dst_len);
468 : :
469 [ # # ]: 0 : if (memcmp(orig_src, orig_dst, COPY_LEN))
470 : 0 : ERR_RETURN("Data mismatch");
471 : :
472 : : /* Verify completion */
473 : 0 : id = ~id;
474 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
475 : 0 : ERR_RETURN("Error with rte_dma_completed\n");
476 : :
477 : : /* Verify expected index(id_count) */
478 [ # # ]: 0 : if (id != id_count)
479 : 0 : ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
480 : : id, id_count);
481 : :
482 : : /* Check for completed and id when no job done */
483 : 0 : id = ~id;
484 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
485 : 0 : ERR_RETURN("Error with rte_dma_completed when no job done\n");
486 : :
487 [ # # ]: 0 : if (id != id_count)
488 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
489 : : id, id_count);
490 : :
491 : : /* Check for completed_status and id when no job done */
492 : 0 : id = ~id;
493 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
494 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
495 [ # # ]: 0 : if (id != id_count)
496 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
497 : : id, 0);
498 : :
499 [ # # ]: 0 : for (i = 0; i < n_src; i++)
500 : 0 : rte_pktmbuf_free(src[i]);
501 [ # # ]: 0 : for (i = 0; i < n_dst; i++)
502 : 0 : rte_pktmbuf_free(dst[i]);
503 : :
504 : : /* Verify that completion returns nothing more */
505 [ # # ]: 0 : if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
506 : 0 : ERR_RETURN("Error with rte_dma_completed in empty check\n");
507 : :
508 : 0 : id_count++;
509 : : }
510 : : }
511 : : return 0;
512 : : }
513 : :
514 : : /* Failure handling test cases - global macros and variables for those tests*/
515 : : #define COMP_BURST_SZ 16
516 : : #define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0)
517 : :
518 : : static int
519 : 0 : test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence,
520 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
521 : : {
522 : : /* Test single full batch statuses with failures */
523 : : enum rte_dma_status_code status[COMP_BURST_SZ];
524 : : struct rte_dma_stats baseline, stats;
525 : : uint16_t invalid_addr_id = 0;
526 : : uint16_t idx;
527 : : uint16_t count, status_count;
528 : : unsigned int i;
529 : 0 : bool error = false;
530 : : int err_count = 0;
531 : :
532 : 0 : rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */
533 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
534 : 0 : int id = rte_dma_copy(dev_id, vchan,
535 : 0 : (i == fail_idx ? 0 : rte_mbuf_data_iova(srcs[i])),
536 [ # # # # ]: 0 : rte_mbuf_data_iova(dsts[i]), COPY_LEN, OPT_FENCE(i));
537 [ # # ]: 0 : if (id < 0)
538 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
539 [ # # ]: 0 : if (i == fail_idx)
540 : 0 : invalid_addr_id = id;
541 : : }
542 : : rte_dma_submit(dev_id, vchan);
543 : 0 : rte_dma_stats_get(dev_id, vchan, &stats);
544 [ # # ]: 0 : if (stats.submitted != baseline.submitted + COMP_BURST_SZ)
545 : 0 : ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n",
546 : : stats.submitted, baseline.submitted + COMP_BURST_SZ);
547 : :
548 : 0 : await_hw(dev_id, vchan);
549 : :
550 : : count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
551 [ # # ]: 0 : if (count != fail_idx)
552 : 0 : ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
553 : : count, fail_idx);
554 [ # # ]: 0 : if (!error)
555 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
556 : : fail_idx);
557 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
558 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
559 : : fail_idx, idx, invalid_addr_id - 1);
560 : :
561 : : /* all checks ok, now verify calling completed() again always returns 0 */
562 [ # # ]: 0 : for (i = 0; i < 10; i++)
563 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0
564 [ # # # # ]: 0 : || error == false || idx != (invalid_addr_id - 1))
565 : 0 : ERR_RETURN("Error with follow-up completed calls for fail idx %u\n",
566 : : fail_idx);
567 : :
568 : : status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ,
569 : : &idx, status);
570 : : /* some HW may stop on error and be restarted after getting error status for single value
571 : : * To handle this case, if we get just one error back, wait for more completions and get
572 : : * status for rest of the burst
573 : : */
574 [ # # ]: 0 : if (status_count == 1) {
575 : 0 : await_hw(dev_id, vchan);
576 : 0 : status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1,
577 : : &idx, &status[1]);
578 : : }
579 : : /* check that at this point we have all status values */
580 [ # # ]: 0 : if (status_count != COMP_BURST_SZ - count)
581 : 0 : ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
582 : : fail_idx, status_count, COMP_BURST_SZ - count);
583 : : /* now verify just one failure followed by multiple successful or skipped entries */
584 [ # # ]: 0 : if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
585 : 0 : ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
586 : : fail_idx);
587 [ # # ]: 0 : for (i = 1; i < status_count; i++)
588 : : /* after a failure in a burst, depending on ordering/fencing,
589 : : * operations may be successful or skipped because of previous error.
590 : : */
591 : 0 : if (status[i] != RTE_DMA_STATUS_SUCCESSFUL
592 [ # # ]: 0 : && status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED)
593 : 0 : ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n",
594 : : fail_idx, count + i, COMP_BURST_SZ);
595 : :
596 : : /* check the completed + errors stats are as expected */
597 : 0 : rte_dma_stats_get(dev_id, vchan, &stats);
598 [ # # ]: 0 : if (stats.completed != baseline.completed + COMP_BURST_SZ)
599 : 0 : ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n",
600 : : stats.completed, baseline.completed + COMP_BURST_SZ);
601 [ # # ]: 0 : for (i = 0; i < status_count; i++)
602 : 0 : err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL);
603 [ # # ]: 0 : if (stats.errors != baseline.errors + err_count)
604 : 0 : ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n",
605 : : stats.errors, baseline.errors + err_count);
606 : :
607 : : return 0;
608 : : }
609 : :
610 : : static int
611 : 0 : test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence,
612 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
613 : : {
614 : : /* Test gathering batch statuses one at a time */
615 : : enum rte_dma_status_code status[COMP_BURST_SZ];
616 : : uint16_t invalid_addr_id = 0;
617 : : uint16_t idx;
618 : : uint16_t count = 0, status_count = 0;
619 : : unsigned int j;
620 : 0 : bool error = false;
621 : :
622 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
623 : 0 : int id = rte_dma_copy(dev_id, vchan,
624 : 0 : (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
625 [ # # # # ]: 0 : rte_mbuf_data_iova(dsts[j]), COPY_LEN, OPT_FENCE(j));
626 [ # # ]: 0 : if (id < 0)
627 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
628 [ # # ]: 0 : if (j == fail_idx)
629 : 0 : invalid_addr_id = id;
630 : : }
631 : 0 : rte_dma_submit(dev_id, vchan);
632 : 0 : await_hw(dev_id, vchan);
633 : :
634 : : /* use regular "completed" until we hit error */
635 [ # # ]: 0 : while (!error) {
636 : : uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error);
637 : 0 : count += n;
638 [ # # ]: 0 : if (n > 1 || count >= COMP_BURST_SZ)
639 : 0 : ERR_RETURN("Error - too many completions got\n");
640 [ # # # # ]: 0 : if (n == 0 && !error)
641 : 0 : ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n",
642 : : count);
643 : : }
644 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
645 : 0 : ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n",
646 : : idx, invalid_addr_id - 1);
647 : :
648 : : /* use completed_status until we hit end of burst */
649 [ # # ]: 0 : while (count + status_count < COMP_BURST_SZ) {
650 : 0 : uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx,
651 : : &status[status_count]);
652 : 0 : await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */
653 : 0 : status_count += n;
654 [ # # ]: 0 : if (n != 1)
655 : 0 : ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n",
656 : : n);
657 : : }
658 : :
659 : : /* check for single failure */
660 [ # # ]: 0 : if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
661 : 0 : ERR_RETURN("Error, unexpected successful DMA transaction\n");
662 [ # # ]: 0 : for (j = 1; j < status_count; j++)
663 : 0 : if (status[j] != RTE_DMA_STATUS_SUCCESSFUL
664 [ # # ]: 0 : && status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED)
665 : 0 : ERR_RETURN("Error, unexpected DMA error reported\n");
666 : :
667 : : return 0;
668 : : }
669 : :
670 : : static int
671 : 0 : test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan,
672 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
673 : : {
674 : : /* When error occurs just collect a single error using "completed_status()"
675 : : * before going to back to completed() calls
676 : : */
677 : : enum rte_dma_status_code status;
678 : : uint16_t invalid_addr_id = 0;
679 : : uint16_t idx;
680 : : uint16_t count, status_count, count2;
681 : : unsigned int j;
682 : 0 : bool error = false;
683 : :
684 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
685 : 0 : int id = rte_dma_copy(dev_id, vchan,
686 : 0 : (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
687 [ # # ]: 0 : rte_mbuf_data_iova(dsts[j]), COPY_LEN, 0);
688 [ # # ]: 0 : if (id < 0)
689 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
690 [ # # ]: 0 : if (j == fail_idx)
691 : 0 : invalid_addr_id = id;
692 : : }
693 : 0 : rte_dma_submit(dev_id, vchan);
694 : 0 : await_hw(dev_id, vchan);
695 : :
696 : : /* get up to the error point */
697 : : count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
698 [ # # ]: 0 : if (count != fail_idx)
699 : 0 : ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
700 : : count, fail_idx);
701 [ # # ]: 0 : if (!error)
702 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
703 : : fail_idx);
704 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
705 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
706 : : fail_idx, idx, invalid_addr_id - 1);
707 : :
708 : : /* get the error code */
709 : : status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status);
710 [ # # ]: 0 : if (status_count != 1)
711 : 0 : ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
712 : : fail_idx, status_count, COMP_BURST_SZ - count);
713 [ # # ]: 0 : if (status == RTE_DMA_STATUS_SUCCESSFUL)
714 : 0 : ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
715 : : fail_idx);
716 : :
717 : : /* delay in case time needed after err handled to complete other jobs */
718 : 0 : await_hw(dev_id, vchan);
719 : :
720 : : /* get the rest of the completions without status */
721 : : count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
722 [ # # ]: 0 : if (error == true)
723 : 0 : ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n",
724 : : fail_idx);
725 [ # # ]: 0 : if (count + status_count + count2 != COMP_BURST_SZ)
726 : 0 : ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n",
727 : : count + status_count + count2, COMP_BURST_SZ);
728 : :
729 : : return 0;
730 : : }
731 : :
732 : : static int
733 : 0 : test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts,
734 : : const unsigned int *fail, size_t num_fail)
735 : : {
736 : : /* test having multiple errors in one go */
737 : : enum rte_dma_status_code status[COMP_BURST_SZ];
738 : : unsigned int i, j;
739 : : uint16_t count, err_count = 0;
740 : 0 : bool error = false;
741 : :
742 : : /* enqueue and gather completions in one go */
743 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
744 : 0 : uintptr_t src = rte_mbuf_data_iova(srcs[j]);
745 : : /* set up for failure if the current index is anywhere is the fails array */
746 [ # # ]: 0 : for (i = 0; i < num_fail; i++)
747 [ # # ]: 0 : if (j == fail[i])
748 : : src = 0;
749 : :
750 : 0 : int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
751 : : COPY_LEN, 0);
752 [ # # ]: 0 : if (id < 0)
753 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
754 : : }
755 : 0 : rte_dma_submit(dev_id, vchan);
756 : 0 : await_hw(dev_id, vchan);
757 : :
758 : : count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status);
759 [ # # ]: 0 : while (count < COMP_BURST_SZ) {
760 : 0 : await_hw(dev_id, vchan);
761 : :
762 : 0 : uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count,
763 : 0 : NULL, &status[count]);
764 [ # # ]: 0 : if (ret == 0)
765 : 0 : ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n",
766 : : count, COMP_BURST_SZ);
767 : 0 : count += ret;
768 : : }
769 [ # # ]: 0 : for (i = 0; i < count; i++)
770 [ # # ]: 0 : if (status[i] != RTE_DMA_STATUS_SUCCESSFUL)
771 : 0 : err_count++;
772 : :
773 [ # # ]: 0 : if (err_count != num_fail)
774 : 0 : ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n",
775 : : err_count, num_fail);
776 : :
777 : : /* enqueue and gather completions in bursts, but getting errors one at a time */
778 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
779 : 0 : uintptr_t src = rte_mbuf_data_iova(srcs[j]);
780 : : /* set up for failure if the current index is anywhere is the fails array */
781 [ # # ]: 0 : for (i = 0; i < num_fail; i++)
782 [ # # ]: 0 : if (j == fail[i])
783 : : src = 0;
784 : :
785 : 0 : int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
786 : : COPY_LEN, 0);
787 [ # # ]: 0 : if (id < 0)
788 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
789 : : }
790 : : rte_dma_submit(dev_id, vchan);
791 : 0 : await_hw(dev_id, vchan);
792 : :
793 : : count = 0;
794 : : err_count = 0;
795 [ # # ]: 0 : while (count + err_count < COMP_BURST_SZ) {
796 : 0 : count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error);
797 [ # # ]: 0 : if (error) {
798 : : uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1,
799 : : NULL, status);
800 [ # # ]: 0 : if (ret != 1)
801 : 0 : ERR_RETURN("Error getting error-status for completions\n");
802 : 0 : err_count += ret;
803 : 0 : await_hw(dev_id, vchan);
804 : : }
805 : : }
806 [ # # ]: 0 : if (err_count != num_fail)
807 : 0 : ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n",
808 : : err_count, num_fail);
809 : :
810 : : return 0;
811 : : }
812 : :
813 : : static int
814 : 0 : test_completion_status(int16_t dev_id, uint16_t vchan, bool fence)
815 : : {
816 : 0 : const unsigned int fail[] = {0, 7, 14, 15};
817 : : struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
818 : : unsigned int i;
819 : :
820 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
821 : 0 : srcs[i] = rte_pktmbuf_alloc(pool);
822 : 0 : dsts[i] = rte_pktmbuf_alloc(pool);
823 : : }
824 : :
825 [ # # ]: 0 : for (i = 0; i < RTE_DIM(fail); i++) {
826 [ # # ]: 0 : if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0)
827 : : return -1;
828 : :
829 [ # # ]: 0 : if (test_individual_status_query_with_failure(dev_id, vchan, fence,
830 : : srcs, dsts, fail[i]) < 0)
831 : : return -1;
832 : :
833 : : /* test is run the same fenced, or unfenced, but no harm in running it twice */
834 [ # # ]: 0 : if (test_single_item_status_query_with_failure(dev_id, vchan,
835 : : srcs, dsts, fail[i]) < 0)
836 : : return -1;
837 : : }
838 : :
839 [ # # ]: 0 : if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0)
840 : : return -1;
841 : :
842 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
843 : 0 : rte_pktmbuf_free(srcs[i]);
844 : 0 : rte_pktmbuf_free(dsts[i]);
845 : : }
846 : : return 0;
847 : : }
848 : :
849 : : static int
850 : 0 : test_completion_handling(int16_t dev_id, uint16_t vchan)
851 : : {
852 : 0 : return test_completion_status(dev_id, vchan, false) /* without fences */
853 [ # # # # ]: 0 : || test_completion_status(dev_id, vchan, true); /* with fences */
854 : : }
855 : :
856 : : static int
857 : 0 : test_enqueue_fill(int16_t dev_id, uint16_t vchan)
858 : : {
859 : 0 : const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
860 : : struct rte_mbuf *dst;
861 : : char *dst_data;
862 : 0 : uint64_t pattern = 0xfedcba9876543210;
863 : : unsigned int i, j;
864 : :
865 : 0 : dst = rte_pktmbuf_alloc(pool);
866 [ # # ]: 0 : if (dst == NULL)
867 : 0 : ERR_RETURN("Failed to allocate mbuf\n");
868 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
869 : :
870 [ # # ]: 0 : for (i = 0; i < RTE_DIM(lengths); i++) {
871 : : /* reset dst_data */
872 : 0 : memset(dst_data, 0, rte_pktmbuf_data_len(dst));
873 : :
874 : : /* perform the fill operation */
875 : 0 : int id = rte_dma_fill(dev_id, vchan, pattern,
876 : 0 : rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT);
877 [ # # ]: 0 : if (id < 0)
878 : 0 : ERR_RETURN("Error with rte_dma_fill\n");
879 : 0 : await_hw(dev_id, vchan);
880 : :
881 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1)
882 : 0 : ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]);
883 : : /* check the data from the fill operation is correct */
884 [ # # ]: 0 : for (j = 0; j < lengths[i]; j++) {
885 : 0 : char pat_byte = ((char *)&pattern)[j % 8];
886 [ # # ]: 0 : if (dst_data[j] != pat_byte)
887 : 0 : ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
888 : : lengths[i], dst_data[j], pat_byte);
889 : : }
890 : : /* check that the data after the fill operation was not written to */
891 [ # # ]: 0 : for (; j < rte_pktmbuf_data_len(dst); j++)
892 [ # # ]: 0 : if (dst_data[j] != 0)
893 : 0 : ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n",
894 : : lengths[i], dst_data[j], 0);
895 : : }
896 : :
897 : 0 : rte_pktmbuf_free(dst);
898 : 0 : return 0;
899 : : }
900 : :
901 : : static int
902 : 0 : test_burst_capacity(int16_t dev_id, uint16_t vchan)
903 : : {
904 : : #define CAP_TEST_BURST_SIZE 64
905 : 0 : const int ring_space = rte_dma_burst_capacity(dev_id, vchan);
906 : : struct rte_mbuf *src, *dst;
907 : : int i, j, iter;
908 : : int cap, ret;
909 : : bool dma_err;
910 : :
911 : 0 : src = rte_pktmbuf_alloc(pool);
912 : 0 : dst = rte_pktmbuf_alloc(pool);
913 : :
914 : : /* to test capacity, we enqueue elements and check capacity is reduced
915 : : * by one each time - rebaselining the expected value after each burst
916 : : * as the capacity is only for a burst. We enqueue multiple bursts to
917 : : * fill up half the ring, before emptying it again. We do this multiple
918 : : * times to ensure that we get to test scenarios where we get ring
919 : : * wrap-around and wrap-around of the ids returned (at UINT16_MAX).
920 : : */
921 [ # # ]: 0 : for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) {
922 [ # # ]: 0 : for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
923 : 0 : cap = rte_dma_burst_capacity(dev_id, vchan);
924 : :
925 [ # # ]: 0 : for (j = 0; j < CAP_TEST_BURST_SIZE; j++) {
926 : 0 : ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src),
927 : 0 : rte_pktmbuf_iova(dst), COPY_LEN, 0);
928 [ # # ]: 0 : if (ret < 0)
929 : 0 : ERR_RETURN("Error with rte_dmadev_copy\n");
930 : :
931 [ # # ]: 0 : if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1))
932 : 0 : ERR_RETURN("Error, ring capacity did not change as expected\n");
933 : : }
934 [ # # ]: 0 : if (rte_dma_submit(dev_id, vchan) < 0)
935 : 0 : ERR_RETURN("Error, failed to submit burst\n");
936 : :
937 [ # # ]: 0 : if (cap < rte_dma_burst_capacity(dev_id, vchan))
938 : 0 : ERR_RETURN("Error, avail ring capacity has gone up, not down\n");
939 : : }
940 : 0 : await_hw(dev_id, vchan);
941 : :
942 [ # # ]: 0 : for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
943 : 0 : ret = rte_dma_completed(dev_id, vchan,
944 : : CAP_TEST_BURST_SIZE, NULL, &dma_err);
945 [ # # # # ]: 0 : if (ret != CAP_TEST_BURST_SIZE || dma_err) {
946 : : enum rte_dma_status_code status;
947 : :
948 : : rte_dma_completed_status(dev_id, vchan, 1, NULL, &status);
949 : 0 : ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n",
950 : : ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status);
951 : : }
952 : : }
953 : 0 : cap = rte_dma_burst_capacity(dev_id, vchan);
954 [ # # ]: 0 : if (cap != ring_space)
955 : 0 : ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n",
956 : : cap, ring_space);
957 : : }
958 : :
959 : 0 : rte_pktmbuf_free(src);
960 : 0 : rte_pktmbuf_free(dst);
961 : :
962 : 0 : return 0;
963 : : }
964 : :
965 : : static int
966 : 0 : test_m2d_auto_free(int16_t dev_id, uint16_t vchan)
967 : : {
968 : : #define NR_MBUF 256
969 : : struct rte_mempool_cache *cache;
970 : : struct rte_mbuf *src[NR_MBUF];
971 : : uint32_t buf_cnt1, buf_cnt2;
972 : : struct rte_mempool_ops *ops;
973 : : uint16_t nb_done = 0;
974 : 0 : bool dma_err = false;
975 : : int retry = 100;
976 : : int i, ret = 0;
977 : : rte_iova_t dst;
978 : :
979 [ # # ]: 0 : dst = (rte_iova_t)env_test_param[TEST_PARAM_REMOTE_ADDR];
980 : :
981 : : /* Capture buffer count before allocating source buffer. */
982 [ # # ]: 0 : cache = rte_mempool_default_cache(pool, rte_lcore_id());
983 [ # # ]: 0 : ops = rte_mempool_get_ops(pool->ops_index);
984 : 0 : buf_cnt1 = ops->get_count(pool) + cache->len;
985 : :
986 [ # # ]: 0 : if (rte_pktmbuf_alloc_bulk(pool, src, NR_MBUF) != 0)
987 : 0 : ERR_RETURN("alloc src mbufs failed.\n");
988 : :
989 [ # # ]: 0 : if ((buf_cnt1 - NR_MBUF) != (ops->get_count(pool) + cache->len)) {
990 : : printf("Buffer count check failed.\n");
991 : : ret = -1;
992 : 0 : goto done;
993 : : }
994 : :
995 [ # # ]: 0 : for (i = 0; i < NR_MBUF; i++) {
996 : 0 : ret = rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(src[i]), dst,
997 : : COPY_LEN, RTE_DMA_OP_FLAG_AUTO_FREE);
998 : :
999 [ # # ]: 0 : if (ret < 0) {
1000 : : printf("rte_dma_copy returned error.\n");
1001 : 0 : goto done;
1002 : : }
1003 : : }
1004 : :
1005 : 0 : rte_dma_submit(dev_id, vchan);
1006 : : do {
1007 : 0 : nb_done += rte_dma_completed(dev_id, vchan, (NR_MBUF - nb_done), NULL, &dma_err);
1008 [ # # ]: 0 : if (dma_err)
1009 : : break;
1010 : : /* Sleep for 1 millisecond */
1011 : 0 : rte_delay_us_sleep(1000);
1012 [ # # # # ]: 0 : } while (retry-- && (nb_done < NR_MBUF));
1013 : :
1014 : 0 : buf_cnt2 = ops->get_count(pool) + cache->len;
1015 [ # # # # ]: 0 : if ((buf_cnt1 != buf_cnt2) || dma_err) {
1016 : : printf("Free mem to dev buffer test failed.\n");
1017 : : ret = -1;
1018 : : }
1019 : :
1020 : 0 : done:
1021 : : /* If the test passes source buffer will be freed in hardware. */
1022 [ # # ]: 0 : if (ret < 0)
1023 : 0 : rte_pktmbuf_free_bulk(&src[nb_done], (NR_MBUF - nb_done));
1024 : :
1025 : : return ret;
1026 : : }
1027 : :
1028 : : static int
1029 : 0 : prepare_m2d_auto_free(int16_t dev_id, uint16_t vchan)
1030 : : {
1031 : 0 : const struct rte_dma_vchan_conf qconf = {
1032 : : .direction = RTE_DMA_DIR_MEM_TO_DEV,
1033 : : .nb_desc = TEST_RINGSIZE,
1034 : : .auto_free.m2d.pool = pool,
1035 : : .dst_port.port_type = RTE_DMA_PORT_PCIE,
1036 : : .dst_port.pcie.coreid = 0,
1037 : : };
1038 : :
1039 : : /* Stop the device to reconfigure vchan. */
1040 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
1041 : 0 : ERR_RETURN("Error stopping device %u\n", dev_id);
1042 : :
1043 [ # # ]: 0 : if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
1044 : 0 : ERR_RETURN("Error with queue configuration\n");
1045 : :
1046 [ # # ]: 0 : if (rte_dma_start(dev_id) != 0)
1047 : 0 : ERR_RETURN("Error with rte_dma_start()\n");
1048 : :
1049 : : return 0;
1050 : : }
1051 : :
1052 : : static int
1053 : 0 : test_dmadev_sg_copy_setup(void)
1054 : : {
1055 : : int ret = TEST_SUCCESS;
1056 : :
1057 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_OPS_COPY_SG) == 0)
1058 : 0 : return TEST_SKIPPED;
1059 : :
1060 : : return ret;
1061 : : }
1062 : :
1063 : : static int
1064 : 0 : test_dmadev_burst_setup(void)
1065 : : {
1066 [ # # ]: 0 : if (rte_dma_burst_capacity(test_dev_id, vchan) < 64) {
1067 : 0 : RTE_LOG(ERR, USER1,
1068 : : "DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
1069 : : test_dev_id);
1070 : 0 : return TEST_SKIPPED;
1071 : : }
1072 : :
1073 : : return TEST_SUCCESS;
1074 : : }
1075 : :
1076 : : static int
1077 : 0 : test_dmadev_err_handling_setup(void)
1078 : : {
1079 : : int ret = TEST_SKIPPED;
1080 : :
1081 : : /* to test error handling we can provide null pointers for source or dest in copies. This
1082 : : * requires VA mode in DPDK, since NULL(0) is a valid physical address.
1083 : : * We also need hardware that can report errors back.
1084 : : */
1085 [ # # ]: 0 : if (rte_eal_iova_mode() != RTE_IOVA_VA)
1086 : 0 : RTE_LOG(ERR, USER1,
1087 : : "DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n",
1088 : : test_dev_id);
1089 [ # # ]: 0 : else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0)
1090 : 0 : RTE_LOG(ERR, USER1,
1091 : : "DMA Dev %u: device does not report errors, skipping error handling tests\n",
1092 : : test_dev_id);
1093 : : else
1094 : : ret = TEST_SUCCESS;
1095 : :
1096 : 0 : return ret;
1097 : : }
1098 : :
1099 : : static int
1100 : 0 : test_dmadev_fill_setup(void)
1101 : : {
1102 : : int ret = TEST_SUCCESS;
1103 : :
1104 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0) {
1105 : 0 : RTE_LOG(ERR, USER1,
1106 : : "DMA Dev %u: No device fill support, skipping fill tests\n", test_dev_id);
1107 : : ret = TEST_SKIPPED;
1108 : : }
1109 : :
1110 : 0 : return ret;
1111 : : }
1112 : :
1113 : : static int
1114 : 0 : test_dmadev_autofree_setup(void)
1115 : : {
1116 : : int ret = TEST_SKIPPED;
1117 : :
1118 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_M2D_AUTO_FREE) &&
1119 [ # # ]: 0 : dma_add_test[TEST_M2D_AUTO_FREE].enabled == true) {
1120 [ # # ]: 0 : if (prepare_m2d_auto_free(test_dev_id, vchan) != 0)
1121 : 0 : return ret;
1122 : :
1123 : : ret = TEST_SUCCESS;
1124 : : }
1125 : :
1126 : : return ret;
1127 : : }
1128 : :
1129 : : static int
1130 : 0 : test_dmadev_setup(void)
1131 : : {
1132 : 0 : int16_t dev_id = test_dev_id;
1133 : : struct rte_dma_stats stats;
1134 : 0 : const struct rte_dma_conf conf = { .nb_vchans = 1};
1135 : 0 : const struct rte_dma_vchan_conf qconf = {
1136 : : .direction = RTE_DMA_DIR_MEM_TO_MEM,
1137 : : .nb_desc = TEST_RINGSIZE,
1138 : : };
1139 : : int ret;
1140 : :
1141 : 0 : ret = rte_dma_info_get(dev_id, &info);
1142 [ # # ]: 0 : if (ret != 0)
1143 : 0 : ERR_RETURN("Error with rte_dma_info_get()\n");
1144 : :
1145 [ # # ]: 0 : if (info.max_vchans < 1)
1146 : 0 : ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
1147 : :
1148 [ # # ]: 0 : if (rte_dma_configure(dev_id, &conf) != 0)
1149 : 0 : ERR_RETURN("Error with rte_dma_configure()\n");
1150 : :
1151 [ # # ]: 0 : if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
1152 : 0 : ERR_RETURN("Error with queue configuration\n");
1153 : :
1154 : 0 : ret = rte_dma_info_get(dev_id, &info);
1155 [ # # # # ]: 0 : if (ret != 0 || info.nb_vchans != 1)
1156 : 0 : ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
1157 : :
1158 [ # # ]: 0 : if (rte_dma_start(dev_id) != 0)
1159 : 0 : ERR_RETURN("Error with rte_dma_start()\n");
1160 : :
1161 [ # # ]: 0 : if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
1162 : 0 : ERR_RETURN("Error with rte_dma_stats_get()\n");
1163 : :
1164 [ # # ]: 0 : if (rte_dma_burst_capacity(dev_id, vchan) < 32)
1165 : 0 : ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
1166 : :
1167 [ # # # # : 0 : if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
# # ]
1168 : 0 : ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
1169 : : "submitted = %"PRIu64", errors = %"PRIu64"\n",
1170 : : stats.completed, stats.submitted, stats.errors);
1171 : 0 : id_count = 0;
1172 : :
1173 : : /* create a mempool for running tests */
1174 : 0 : pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
1175 : : TEST_RINGSIZE * 2, /* n == num elements */
1176 : : 32, /* cache size */
1177 : : 0, /* priv size */
1178 : : COPY_LEN + RTE_PKTMBUF_HEADROOM, /* data room size */
1179 : 0 : info.numa_node);
1180 [ # # ]: 0 : if (pool == NULL)
1181 : 0 : ERR_RETURN("Error with mempool creation\n");
1182 : :
1183 : 0 : check_err_stats = false;
1184 : 0 : vchan = 0;
1185 : :
1186 : 0 : return 0;
1187 : : }
1188 : :
1189 : : static void
1190 : 0 : test_dmadev_teardown(void)
1191 : : {
1192 : 0 : rte_mempool_free(pool);
1193 : 0 : rte_dma_stop(test_dev_id);
1194 : 0 : rte_dma_stats_reset(test_dev_id, vchan);
1195 : 0 : test_dev_id = -EINVAL;
1196 : 0 : }
1197 : :
1198 : : static int
1199 : 0 : test_dmadev_instance(int16_t dev_id)
1200 : : {
1201 : : struct rte_dma_info dev_info;
1202 : : enum {
1203 : : TEST_COPY = 0,
1204 : : TEST_COPY_SG,
1205 : : TEST_START,
1206 : : TEST_BURST,
1207 : : TEST_ERR,
1208 : : TEST_FILL,
1209 : : TEST_M2D,
1210 : : TEST_END
1211 : : };
1212 : :
1213 : : static struct runtest_param param[] = {
1214 : : {"copy", test_enqueue_copies, 640},
1215 : : {"sg_copy", test_enqueue_sg_copies, 1},
1216 : : {"stop_start", test_stop_start, 1},
1217 : : {"burst_capacity", test_burst_capacity, 1},
1218 : : {"error_handling", test_completion_handling, 1},
1219 : : {"fill", test_enqueue_fill, 1},
1220 : : {"m2d_auto_free", test_m2d_auto_free, 128},
1221 : : };
1222 : :
1223 : : static struct unit_test_suite ts = {
1224 : : .suite_name = "DMA dev instance testsuite",
1225 : : .setup = test_dmadev_setup,
1226 : : .teardown = test_dmadev_teardown,
1227 : : .unit_test_cases = {
1228 : : TEST_CASE_NAMED_WITH_DATA("copy",
1229 : : NULL, NULL,
1230 : : runtest, ¶m[TEST_COPY]),
1231 : : TEST_CASE_NAMED_WITH_DATA("sg_copy",
1232 : : test_dmadev_sg_copy_setup, NULL,
1233 : : runtest, ¶m[TEST_COPY_SG]),
1234 : : TEST_CASE_NAMED_WITH_DATA("stop_start",
1235 : : NULL, NULL,
1236 : : runtest, ¶m[TEST_START]),
1237 : : TEST_CASE_NAMED_WITH_DATA("burst_capacity",
1238 : : test_dmadev_burst_setup, NULL,
1239 : : runtest, ¶m[TEST_BURST]),
1240 : : TEST_CASE_NAMED_WITH_DATA("error_handling",
1241 : : test_dmadev_err_handling_setup, NULL,
1242 : : runtest, ¶m[TEST_ERR]),
1243 : : TEST_CASE_NAMED_WITH_DATA("fill",
1244 : : test_dmadev_fill_setup, NULL,
1245 : : runtest, ¶m[TEST_FILL]),
1246 : : TEST_CASE_NAMED_WITH_DATA("m2d_autofree",
1247 : : test_dmadev_autofree_setup, NULL,
1248 : : runtest, ¶m[TEST_M2D]),
1249 : : TEST_CASES_END()
1250 : : }
1251 : : };
1252 : :
1253 : : int ret;
1254 : :
1255 [ # # ]: 0 : if (rte_dma_info_get(dev_id, &dev_info) < 0)
1256 : : return TEST_SKIPPED;
1257 : :
1258 : 0 : test_dev_id = dev_id;
1259 : 0 : printf("\n### Test dmadev instance %u [%s]\n",
1260 : : test_dev_id, dev_info.dev_name);
1261 : :
1262 : 0 : ret = unit_test_suite_runner(&ts);
1263 : 0 : test_dev_id = -EINVAL;
1264 : :
1265 : 0 : return ret;
1266 : : }
1267 : :
1268 : : static void
1269 : 0 : parse_dma_env_var(void)
1270 : : {
1271 : 0 : char *dma_env_param_str = getenv("DPDK_ADD_DMA_TEST_PARAM");
1272 : 0 : char *dma_env_test_str = getenv("DPDK_ADD_DMA_TEST");
1273 : 0 : char *params[32] = {0};
1274 : 0 : char *tests[32] = {0};
1275 : 0 : char *var[2] = {0};
1276 : : int n_var = 0;
1277 : : int i, j;
1278 : :
1279 : : /* Additional test from commandline. */
1280 [ # # # # ]: 0 : if (dma_env_test_str && strlen(dma_env_test_str) > 0) {
1281 : 0 : n_var = rte_strsplit(dma_env_test_str, strlen(dma_env_test_str), tests,
1282 : : RTE_DIM(tests), ',');
1283 [ # # ]: 0 : for (i = 0; i < n_var; i++) {
1284 [ # # ]: 0 : for (j = 0; j < TEST_MAX; j++) {
1285 [ # # ]: 0 : if (!strcmp(tests[i], dma_add_test[j].name))
1286 : 0 : dma_add_test[j].enabled = true;
1287 : : }
1288 : : }
1289 : : }
1290 : :
1291 : : /* Commandline variables for test */
1292 [ # # # # ]: 0 : if (dma_env_param_str && strlen(dma_env_param_str) > 0) {
1293 : 0 : n_var = rte_strsplit(dma_env_param_str, strlen(dma_env_param_str), params,
1294 : : RTE_DIM(params), ',');
1295 [ # # ]: 0 : for (i = 0; i < n_var; i++) {
1296 : 0 : rte_strsplit(params[i], strlen(params[i]), var, RTE_DIM(var), '=');
1297 [ # # ]: 0 : for (j = 0; j < TEST_PARAM_MAX; j++) {
1298 [ # # ]: 0 : if (!strcmp(var[0], dma_test_param[j]))
1299 : 0 : env_test_param[j] = strtoul(var[1], NULL, 16);
1300 : : }
1301 : : }
1302 : : }
1303 : 0 : }
1304 : :
1305 : : static int
1306 : 0 : test_dma(void)
1307 : : {
1308 : : const char *pmd = "dma_skeleton";
1309 : : int i;
1310 : :
1311 : 0 : parse_dma_env_var();
1312 : :
1313 : : /* attempt to create skeleton instance - ignore errors due to one being already present*/
1314 : 0 : rte_vdev_init(pmd, NULL);
1315 : :
1316 [ # # ]: 0 : if (rte_dma_count_avail() == 0)
1317 : : return TEST_SKIPPED;
1318 : :
1319 [ # # ]: 0 : RTE_DMA_FOREACH_DEV(i) {
1320 [ # # ]: 0 : if (test_dma_api(i) < 0)
1321 : 0 : ERR_RETURN("Error performing API tests\n");
1322 : :
1323 [ # # ]: 0 : if (test_dmadev_instance(i) < 0)
1324 : 0 : ERR_RETURN("Error, test failure for device %d\n", i);
1325 : : }
1326 : :
1327 : : return 0;
1328 : : }
1329 : :
1330 : 252 : REGISTER_DRIVER_TEST(dmadev_autotest, test_dma);
|