Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2021 HiSilicon Limited
3 : : * Copyright(c) 2021 Intel Corporation
4 : : */
5 : :
6 : : #include <inttypes.h>
7 : :
8 : : #include <rte_dmadev.h>
9 : : #include <rte_mbuf.h>
10 : : #include <rte_pause.h>
11 : : #include <rte_cycles.h>
12 : : #include <rte_random.h>
13 : : #include <rte_bus_vdev.h>
14 : : #include <rte_dmadev_pmd.h>
15 : :
16 : : #include "test.h"
17 : : #include "test_dmadev_api.h"
18 : :
19 : : #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
20 : :
21 : : #define TEST_NAME_MAX_LEN 80
22 : : #define TEST_RINGSIZE 512
23 : : #define COPY_LEN 2048
24 : :
25 : : static struct rte_dma_info info;
26 : : static struct rte_mempool *pool;
27 : : static bool check_err_stats;
28 : : static int16_t test_dev_id;
29 : : static uint16_t id_count;
30 : : static uint16_t vchan;
31 : :
32 : : enum {
33 : : TEST_PARAM_REMOTE_ADDR = 0,
34 : : TEST_PARAM_MAX,
35 : : };
36 : :
37 : : static const char * const dma_test_param[] = {
38 : : [TEST_PARAM_REMOTE_ADDR] = "remote_addr",
39 : : };
40 : :
41 : : static uint64_t env_test_param[TEST_PARAM_MAX];
42 : :
43 : : enum {
44 : : TEST_M2D_AUTO_FREE = 0,
45 : : TEST_MAX,
46 : : };
47 : :
48 : : struct dma_add_test {
49 : : const char *name;
50 : : bool enabled;
51 : : };
52 : :
53 : : struct dma_add_test dma_add_test[] = {
54 : : [TEST_M2D_AUTO_FREE] = {.name = "m2d_auto_free", .enabled = false},
55 : : };
56 : :
57 : : static void
58 : : __rte_format_printf(3, 4)
59 : 0 : print_err(const char *func, int lineno, const char *format, ...)
60 : : {
61 : : va_list ap;
62 : :
63 : 0 : fprintf(stderr, "In %s:%d - ", func, lineno);
64 : 0 : va_start(ap, format);
65 : 0 : vfprintf(stderr, format, ap);
66 : 0 : va_end(ap);
67 : 0 : }
68 : :
69 : : struct runtest_param {
70 : : const char name[TEST_NAME_MAX_LEN];
71 : : int (*test_fn)(int16_t dev_id, uint16_t vchan);
72 : : int iterations;
73 : : };
74 : :
75 : : static int
76 : 0 : runtest(const void *args)
77 : : {
78 : : int (*test_fn)(int16_t dev_id, uint16_t vchan);
79 : : const struct runtest_param *param = args;
80 : : struct rte_dma_stats stats;
81 : : const char *printable;
82 : : int iterations;
83 : : int16_t dev_id;
84 : : int i;
85 : :
86 : 0 : printable = param->name;
87 : 0 : iterations = param->iterations;
88 : 0 : test_fn = param->test_fn;
89 : 0 : dev_id = test_dev_id;
90 : :
91 : 0 : rte_dma_stats_reset(dev_id, vchan);
92 : 0 : printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable,
93 [ # # ]: 0 : check_err_stats ? " " : "(errors expected)");
94 [ # # ]: 0 : for (i = 0; i < iterations; i++) {
95 [ # # ]: 0 : if (test_fn(dev_id, vchan) < 0)
96 : : return -1;
97 : :
98 : 0 : rte_dma_stats_get(dev_id, 0, &stats);
99 : 0 : printf("Ops submitted: %"PRIu64"\t", stats.submitted);
100 : 0 : printf("Ops completed: %"PRIu64"\t", stats.completed);
101 : 0 : printf("Errors: %"PRIu64"\r", stats.errors);
102 : :
103 [ # # ]: 0 : if (stats.completed != stats.submitted)
104 : 0 : ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
105 [ # # # # ]: 0 : if (check_err_stats && stats.errors != 0)
106 : 0 : ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
107 : : }
108 : : printf("\n");
109 : 0 : return 0;
110 : : }
111 : :
112 : : static void
113 : 0 : await_hw(int16_t dev_id, uint16_t vchan)
114 : : {
115 : : enum rte_dma_vchan_status st;
116 : :
117 [ # # ]: 0 : if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) {
118 : : /* for drivers that don't support this op, just sleep for 1 millisecond */
119 : 0 : rte_delay_us_sleep(1000);
120 : 0 : return;
121 : : }
122 : :
123 : : /* for those that do, *max* end time is one second from now, but all should be faster */
124 : 0 : const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz();
125 [ # # # # ]: 0 : while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) {
126 : : rte_pause();
127 : 0 : rte_dma_vchan_status(dev_id, vchan, &st);
128 : : }
129 : : }
130 : :
131 : : /* run a series of copy tests just using some different options for enqueues and completions */
132 : : static int
133 : 0 : do_multi_copies(int16_t dev_id, uint16_t vchan,
134 : : int split_batches, /* submit 2 x 16 or 1 x 32 burst */
135 : : int split_completions, /* gather 2 x 16 or 1 x 32 completions */
136 : : int use_completed_status) /* use completed or completed_status function */
137 : : {
138 : : struct rte_mbuf *srcs[32], *dsts[32];
139 : : enum rte_dma_status_code sc[32];
140 : : unsigned int i, j;
141 : 0 : bool dma_err = false;
142 : :
143 : : /* Enqueue burst of copies and hit doorbell */
144 [ # # ]: 0 : for (i = 0; i < RTE_DIM(srcs); i++) {
145 : : uint64_t *src_data;
146 : :
147 [ # # ]: 0 : if (split_batches && i == RTE_DIM(srcs) / 2)
148 : 0 : rte_dma_submit(dev_id, vchan);
149 : :
150 : 0 : srcs[i] = rte_pktmbuf_alloc(pool);
151 : 0 : dsts[i] = rte_pktmbuf_alloc(pool);
152 [ # # # # ]: 0 : if (srcs[i] == NULL || dsts[i] == NULL)
153 : 0 : ERR_RETURN("Error allocating buffers\n");
154 : :
155 : 0 : src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
156 [ # # ]: 0 : for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
157 : 0 : src_data[j] = rte_rand();
158 : :
159 : 0 : if (rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(srcs[i]),
160 [ # # ]: 0 : rte_mbuf_data_iova(dsts[i]), COPY_LEN, 0) != id_count++)
161 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
162 : : }
163 : 0 : rte_dma_submit(dev_id, vchan);
164 : :
165 : 0 : await_hw(dev_id, vchan);
166 : :
167 [ # # ]: 0 : if (split_completions) {
168 : : /* gather completions in two halves */
169 : : uint16_t half_len = RTE_DIM(srcs) / 2;
170 : 0 : int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
171 [ # # # # ]: 0 : if (ret != half_len || dma_err)
172 : 0 : ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
173 : : ret, half_len, dma_err);
174 : :
175 : 0 : ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
176 [ # # # # ]: 0 : if (ret != half_len || dma_err)
177 : 0 : ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
178 : : ret, half_len, dma_err);
179 : : } else {
180 : : /* gather all completions in one go, using either
181 : : * completed or completed_status fns
182 : : */
183 [ # # ]: 0 : if (!use_completed_status) {
184 : 0 : int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
185 [ # # # # ]: 0 : if (n != RTE_DIM(srcs) || dma_err)
186 : 0 : ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n",
187 : : n, RTE_DIM(srcs), dma_err);
188 : : } else {
189 : 0 : int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc);
190 [ # # ]: 0 : if (n != RTE_DIM(srcs))
191 : 0 : ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n",
192 : : n, RTE_DIM(srcs));
193 : :
194 [ # # ]: 0 : for (j = 0; j < (uint16_t)n; j++)
195 [ # # ]: 0 : if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL)
196 : 0 : ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n",
197 : : j, sc[j]);
198 : : }
199 : : }
200 : :
201 : : /* check for empty */
202 : : int ret = use_completed_status ?
203 [ # # ]: 0 : rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) :
204 : : rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
205 [ # # ]: 0 : if (ret != 0)
206 : 0 : ERR_RETURN("Error with completion check - ops unexpectedly returned\n");
207 : :
208 [ # # ]: 0 : for (i = 0; i < RTE_DIM(srcs); i++) {
209 : : char *src_data, *dst_data;
210 : :
211 : 0 : src_data = rte_pktmbuf_mtod(srcs[i], char *);
212 : 0 : dst_data = rte_pktmbuf_mtod(dsts[i], char *);
213 [ # # ]: 0 : for (j = 0; j < COPY_LEN; j++)
214 [ # # ]: 0 : if (src_data[j] != dst_data[j])
215 : 0 : ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j);
216 : :
217 : 0 : rte_pktmbuf_free(srcs[i]);
218 : 0 : rte_pktmbuf_free(dsts[i]);
219 : : }
220 : : return 0;
221 : : }
222 : :
223 : : static int
224 : 0 : test_single_copy(int16_t dev_id, uint16_t vchan)
225 : : {
226 : : uint16_t i;
227 : : uint16_t id;
228 : : enum rte_dma_status_code status;
229 : : struct rte_mbuf *src, *dst;
230 : : char *src_data, *dst_data;
231 : :
232 : 0 : src = rte_pktmbuf_alloc(pool);
233 : 0 : dst = rte_pktmbuf_alloc(pool);
234 : 0 : src_data = rte_pktmbuf_mtod(src, char *);
235 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
236 : :
237 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
238 : 0 : src_data[i] = rte_rand() & 0xFF;
239 : :
240 : 0 : id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst),
241 : : COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT);
242 [ # # ]: 0 : if (id != id_count)
243 : 0 : ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n",
244 : : id, id_count);
245 : :
246 : : /* give time for copy to finish, then check it was done */
247 : 0 : await_hw(dev_id, vchan);
248 : :
249 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
250 [ # # ]: 0 : if (dst_data[i] != src_data[i])
251 : 0 : ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i,
252 : : dst_data[i], src_data[i]);
253 : :
254 : : /* now check completion works */
255 : 0 : id = ~id;
256 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
257 : 0 : ERR_RETURN("Error with rte_dma_completed\n");
258 : :
259 [ # # ]: 0 : if (id != id_count)
260 : 0 : ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
261 : : id, id_count);
262 : :
263 : : /* check for completed and id when no job done */
264 : 0 : id = ~id;
265 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
266 : 0 : ERR_RETURN("Error with rte_dma_completed when no job done\n");
267 [ # # ]: 0 : if (id != id_count)
268 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
269 : : id, id_count);
270 : :
271 : : /* check for completed_status and id when no job done */
272 : 0 : id = ~id;
273 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
274 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
275 [ # # ]: 0 : if (id != id_count)
276 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
277 : : id, id_count);
278 : :
279 : 0 : rte_pktmbuf_free(src);
280 : 0 : rte_pktmbuf_free(dst);
281 : :
282 : : /* now check completion returns nothing more */
283 [ # # ]: 0 : if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
284 : 0 : ERR_RETURN("Error with rte_dma_completed in empty check\n");
285 : :
286 : 0 : id_count++;
287 : :
288 : 0 : return 0;
289 : : }
290 : :
291 : : static int
292 : 0 : test_enqueue_copies(int16_t dev_id, uint16_t vchan)
293 : : {
294 : : unsigned int i;
295 : :
296 : : /* test doing a single copy */
297 [ # # ]: 0 : if (test_single_copy(dev_id, vchan) < 0)
298 : : return -1;
299 : :
300 : : /* test doing a multiple single copies */
301 : : do {
302 : : uint16_t id;
303 : : const uint16_t max_ops = 4;
304 : : struct rte_mbuf *src, *dst;
305 : : char *src_data, *dst_data;
306 : : uint16_t count;
307 : :
308 : 0 : src = rte_pktmbuf_alloc(pool);
309 : 0 : dst = rte_pktmbuf_alloc(pool);
310 : 0 : src_data = rte_pktmbuf_mtod(src, char *);
311 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
312 : :
313 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
314 : 0 : src_data[i] = rte_rand() & 0xFF;
315 : :
316 : : /* perform the same copy <max_ops> times */
317 [ # # ]: 0 : for (i = 0; i < max_ops; i++)
318 : 0 : if (rte_dma_copy(dev_id, vchan,
319 : 0 : rte_pktmbuf_iova(src),
320 : 0 : rte_pktmbuf_iova(dst),
321 [ # # ]: 0 : COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++)
322 : 0 : ERR_RETURN("Error with rte_dma_copy\n");
323 : :
324 : 0 : await_hw(dev_id, vchan);
325 : :
326 : : count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL);
327 [ # # ]: 0 : if (count != max_ops)
328 : 0 : ERR_RETURN("Error with rte_dma_completed, got %u not %u\n",
329 : : count, max_ops);
330 : :
331 [ # # ]: 0 : if (id != id_count - 1)
332 : 0 : ERR_RETURN("Error, incorrect job id returned: got %u not %u\n",
333 : : id, id_count - 1);
334 : :
335 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
336 [ # # ]: 0 : if (dst_data[i] != src_data[i])
337 : 0 : ERR_RETURN("Data mismatch at char %u\n", i);
338 : :
339 : 0 : rte_pktmbuf_free(src);
340 : 0 : rte_pktmbuf_free(dst);
341 : : } while (0);
342 : :
343 : : /* test doing multiple copies */
344 : 0 : return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */
345 : : /* enqueue 2 batches and then complete both */
346 [ # # ]: 0 : || do_multi_copies(dev_id, vchan, 1, 0, 0)
347 : : /* enqueue 1 batch, then complete in two halves */
348 [ # # ]: 0 : || do_multi_copies(dev_id, vchan, 0, 1, 0)
349 : : /* test using completed_status in place of regular completed API */
350 [ # # # # ]: 0 : || do_multi_copies(dev_id, vchan, 0, 0, 1);
351 : : }
352 : :
353 : : static int
354 : 0 : test_stop_start(int16_t dev_id, uint16_t vchan)
355 : : {
356 : : /* device is already started on input, should be (re)started on output */
357 : :
358 : 0 : uint16_t id = 0;
359 : 0 : enum rte_dma_status_code status = RTE_DMA_STATUS_SUCCESSFUL;
360 : :
361 : : /* - test stopping a device works ok,
362 : : * - then do a start-stop without doing a copy
363 : : * - finally restart the device
364 : : * checking for errors at each stage, and validating we can still copy at the end.
365 : : */
366 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
367 : 0 : ERR_RETURN("Error stopping device\n");
368 : :
369 [ # # ]: 0 : if (rte_dma_start(dev_id) < 0)
370 : 0 : ERR_RETURN("Error restarting device\n");
371 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
372 : 0 : ERR_RETURN("Error stopping device after restart (no jobs executed)\n");
373 : :
374 [ # # ]: 0 : if (rte_dma_start(dev_id) < 0)
375 : 0 : ERR_RETURN("Error restarting device after multiple stop-starts\n");
376 : :
377 : : /* before doing a copy, we need to know what the next id will be it should
378 : : * either be:
379 : : * - the last completed job before start if driver does not reset id on stop
380 : : * - or -1 i.e. next job is 0, if driver does reset the job ids on stop
381 : : */
382 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
383 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
384 : 0 : id += 1; /* id_count is next job id */
385 [ # # # # ]: 0 : if (id != id_count && id != 0)
386 : 0 : ERR_RETURN("Unexpected next id from device after stop-start. Got %u, expected %u or 0\n",
387 : : id, id_count);
388 : :
389 : 0 : id_count = id;
390 [ # # ]: 0 : if (test_single_copy(dev_id, vchan) < 0)
391 : 0 : ERR_RETURN("Error performing copy after device restart\n");
392 : : return 0;
393 : : }
394 : :
395 : : static int
396 : 0 : test_enqueue_sg_copies(int16_t dev_id, uint16_t vchan)
397 : : {
398 : : unsigned int src_len, dst_len, n_sge, len, i, j, k;
399 : : char orig_src[COPY_LEN], orig_dst[COPY_LEN];
400 : 0 : struct rte_dma_info info = { 0 };
401 : : enum rte_dma_status_code status;
402 : : uint16_t id, n_src, n_dst;
403 : :
404 [ # # ]: 0 : if (rte_dma_info_get(dev_id, &info) < 0)
405 : 0 : ERR_RETURN("Failed to get dev info");
406 : :
407 [ # # ]: 0 : if (info.max_sges < 2)
408 : 0 : ERR_RETURN("Test needs minimum 2 SG pointers");
409 : :
410 : : n_sge = info.max_sges;
411 : :
412 [ # # ]: 0 : for (n_src = 1; n_src <= n_sge; n_src++) {
413 [ # # ]: 0 : for (n_dst = 1; n_dst <= n_sge; n_dst++) {
414 : : /* Normalize SG buffer lengths */
415 : : len = COPY_LEN;
416 : 0 : len -= (len % (n_src * n_dst));
417 : 0 : dst_len = len / n_dst;
418 : 0 : src_len = len / n_src;
419 : :
420 : 0 : struct rte_dma_sge *sg_src = alloca(sizeof(struct rte_dma_sge) * n_sge);
421 : 0 : struct rte_dma_sge *sg_dst = alloca(sizeof(struct rte_dma_sge) * n_sge);
422 : 0 : struct rte_mbuf **src = alloca(sizeof(struct rte_mbuf *) * n_sge);
423 : 0 : struct rte_mbuf **dst = alloca(sizeof(struct rte_mbuf *) * n_sge);
424 : 0 : char **src_data = alloca(sizeof(char *) * n_sge);
425 : 0 : char **dst_data = alloca(sizeof(char *) * n_sge);
426 : :
427 [ # # ]: 0 : for (i = 0 ; i < len; i++)
428 : 0 : orig_src[i] = rte_rand() & 0xFF;
429 : :
430 : 0 : memset(orig_dst, 0, len);
431 : :
432 [ # # ]: 0 : for (i = 0; i < n_src; i++) {
433 : 0 : src[i] = rte_pktmbuf_alloc(pool);
434 : : RTE_ASSERT(src[i] != NULL);
435 : 0 : sg_src[i].addr = rte_pktmbuf_iova(src[i]);
436 : 0 : sg_src[i].length = src_len;
437 : 0 : src_data[i] = rte_pktmbuf_mtod(src[i], char *);
438 : : }
439 : :
440 [ # # ]: 0 : for (k = 0; k < n_dst; k++) {
441 : 0 : dst[k] = rte_pktmbuf_alloc(pool);
442 : : RTE_ASSERT(dst[k] != NULL);
443 : 0 : sg_dst[k].addr = rte_pktmbuf_iova(dst[k]);
444 : 0 : sg_dst[k].length = dst_len;
445 : 0 : dst_data[k] = rte_pktmbuf_mtod(dst[k], char *);
446 : : }
447 : :
448 [ # # ]: 0 : for (i = 0; i < n_src; i++) {
449 [ # # ]: 0 : for (j = 0; j < src_len; j++)
450 : 0 : src_data[i][j] = orig_src[i * src_len + j];
451 : : }
452 : :
453 [ # # ]: 0 : for (k = 0; k < n_dst; k++)
454 : 0 : memset(dst_data[k], 0, dst_len);
455 : :
456 : : printf("\tsrc segs: %2d [seg len: %4d] - dst segs: %2d [seg len : %4d]\n",
457 : : n_src, src_len, n_dst, dst_len);
458 : :
459 : 0 : id = rte_dma_copy_sg(dev_id, vchan, sg_src, sg_dst, n_src, n_dst,
460 : : RTE_DMA_OP_FLAG_SUBMIT);
461 : :
462 [ # # ]: 0 : if (id != id_count)
463 : 0 : ERR_RETURN("Error with rte_dma_copy_sg, got %u, expected %u\n",
464 : : id, id_count);
465 : :
466 : : /* Give time for copy to finish, then check it was done */
467 : 0 : await_hw(dev_id, vchan);
468 : :
469 [ # # ]: 0 : for (k = 0; k < n_dst; k++)
470 : 0 : memcpy((&orig_dst[0] + k * dst_len), dst_data[k], dst_len);
471 : :
472 [ # # ]: 0 : if (memcmp(orig_src, orig_dst, COPY_LEN))
473 : 0 : ERR_RETURN("Data mismatch");
474 : :
475 : : /* Verify completion */
476 : 0 : id = ~id;
477 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
478 : 0 : ERR_RETURN("Error with rte_dma_completed\n");
479 : :
480 : : /* Verify expected index(id_count) */
481 [ # # ]: 0 : if (id != id_count)
482 : 0 : ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
483 : : id, id_count);
484 : :
485 : : /* Check for completed and id when no job done */
486 : 0 : id = ~id;
487 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
488 : 0 : ERR_RETURN("Error with rte_dma_completed when no job done\n");
489 : :
490 [ # # ]: 0 : if (id != id_count)
491 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
492 : : id, id_count);
493 : :
494 : : /* Check for completed_status and id when no job done */
495 : 0 : id = ~id;
496 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
497 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
498 [ # # ]: 0 : if (id != id_count)
499 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
500 : : id, 0);
501 : :
502 [ # # ]: 0 : for (i = 0; i < n_src; i++)
503 : 0 : rte_pktmbuf_free(src[i]);
504 [ # # ]: 0 : for (i = 0; i < n_dst; i++)
505 : 0 : rte_pktmbuf_free(dst[i]);
506 : :
507 : : /* Verify that completion returns nothing more */
508 [ # # ]: 0 : if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
509 : 0 : ERR_RETURN("Error with rte_dma_completed in empty check\n");
510 : :
511 : 0 : id_count++;
512 : : }
513 : : }
514 : : return 0;
515 : : }
516 : :
517 : : /* Failure handling test cases - global macros and variables for those tests*/
518 : : #define COMP_BURST_SZ 16
519 : : #define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0)
520 : :
521 : : static int
522 : 0 : test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence,
523 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
524 : : {
525 : : /* Test single full batch statuses with failures */
526 : : enum rte_dma_status_code status[COMP_BURST_SZ];
527 : : struct rte_dma_stats baseline, stats;
528 : : uint16_t invalid_addr_id = 0;
529 : : uint16_t idx;
530 : : uint16_t count, status_count;
531 : : unsigned int i;
532 : 0 : bool error = false;
533 : : int err_count = 0;
534 : :
535 : 0 : rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */
536 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
537 : 0 : int id = rte_dma_copy(dev_id, vchan,
538 : 0 : (i == fail_idx ? 0 : rte_mbuf_data_iova(srcs[i])),
539 [ # # # # ]: 0 : rte_mbuf_data_iova(dsts[i]), COPY_LEN, OPT_FENCE(i));
540 [ # # ]: 0 : if (id < 0)
541 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
542 [ # # ]: 0 : if (i == fail_idx)
543 : 0 : invalid_addr_id = id;
544 : : }
545 : : rte_dma_submit(dev_id, vchan);
546 : 0 : rte_dma_stats_get(dev_id, vchan, &stats);
547 [ # # ]: 0 : if (stats.submitted != baseline.submitted + COMP_BURST_SZ)
548 : 0 : ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n",
549 : : stats.submitted, baseline.submitted + COMP_BURST_SZ);
550 : :
551 : 0 : await_hw(dev_id, vchan);
552 : :
553 : : count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
554 [ # # ]: 0 : if (count != fail_idx)
555 : 0 : ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
556 : : count, fail_idx);
557 [ # # ]: 0 : if (!error)
558 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
559 : : fail_idx);
560 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
561 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
562 : : fail_idx, idx, invalid_addr_id - 1);
563 : :
564 : : /* all checks ok, now verify calling completed() again always returns 0 */
565 [ # # ]: 0 : for (i = 0; i < 10; i++)
566 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0
567 [ # # # # ]: 0 : || error == false || idx != (invalid_addr_id - 1))
568 : 0 : ERR_RETURN("Error with follow-up completed calls for fail idx %u\n",
569 : : fail_idx);
570 : :
571 : : status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ,
572 : : &idx, status);
573 : : /* some HW may stop on error and be restarted after getting error status for single value
574 : : * To handle this case, if we get just one error back, wait for more completions and get
575 : : * status for rest of the burst
576 : : */
577 [ # # ]: 0 : if (status_count == 1) {
578 : 0 : await_hw(dev_id, vchan);
579 : 0 : status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1,
580 : : &idx, &status[1]);
581 : : }
582 : : /* check that at this point we have all status values */
583 [ # # ]: 0 : if (status_count != COMP_BURST_SZ - count)
584 : 0 : ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
585 : : fail_idx, status_count, COMP_BURST_SZ - count);
586 : : /* now verify just one failure followed by multiple successful or skipped entries */
587 [ # # ]: 0 : if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
588 : 0 : ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
589 : : fail_idx);
590 [ # # ]: 0 : for (i = 1; i < status_count; i++)
591 : : /* after a failure in a burst, depending on ordering/fencing,
592 : : * operations may be successful or skipped because of previous error.
593 : : */
594 : 0 : if (status[i] != RTE_DMA_STATUS_SUCCESSFUL
595 [ # # ]: 0 : && status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED)
596 : 0 : ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n",
597 : : fail_idx, count + i, COMP_BURST_SZ);
598 : :
599 : : /* check the completed + errors stats are as expected */
600 : 0 : rte_dma_stats_get(dev_id, vchan, &stats);
601 [ # # ]: 0 : if (stats.completed != baseline.completed + COMP_BURST_SZ)
602 : 0 : ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n",
603 : : stats.completed, baseline.completed + COMP_BURST_SZ);
604 [ # # ]: 0 : for (i = 0; i < status_count; i++)
605 : 0 : err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL);
606 [ # # ]: 0 : if (stats.errors != baseline.errors + err_count)
607 : 0 : ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n",
608 : : stats.errors, baseline.errors + err_count);
609 : :
610 : : return 0;
611 : : }
612 : :
613 : : static int
614 : 0 : test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence,
615 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
616 : : {
617 : : /* Test gathering batch statuses one at a time */
618 : : enum rte_dma_status_code status[COMP_BURST_SZ];
619 : : uint16_t invalid_addr_id = 0;
620 : : uint16_t idx;
621 : : uint16_t count = 0, status_count = 0;
622 : : unsigned int j;
623 : 0 : bool error = false;
624 : :
625 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
626 : 0 : int id = rte_dma_copy(dev_id, vchan,
627 : 0 : (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
628 [ # # # # ]: 0 : rte_mbuf_data_iova(dsts[j]), COPY_LEN, OPT_FENCE(j));
629 [ # # ]: 0 : if (id < 0)
630 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
631 [ # # ]: 0 : if (j == fail_idx)
632 : 0 : invalid_addr_id = id;
633 : : }
634 : 0 : rte_dma_submit(dev_id, vchan);
635 : 0 : await_hw(dev_id, vchan);
636 : :
637 : : /* use regular "completed" until we hit error */
638 [ # # ]: 0 : while (!error) {
639 : : uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error);
640 : 0 : count += n;
641 [ # # ]: 0 : if (n > 1 || count >= COMP_BURST_SZ)
642 : 0 : ERR_RETURN("Error - too many completions got\n");
643 [ # # # # ]: 0 : if (n == 0 && !error)
644 : 0 : ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n",
645 : : count);
646 : : }
647 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
648 : 0 : ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n",
649 : : idx, invalid_addr_id - 1);
650 : :
651 : : /* use completed_status until we hit end of burst */
652 [ # # ]: 0 : while (count + status_count < COMP_BURST_SZ) {
653 : 0 : uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx,
654 : : &status[status_count]);
655 : 0 : await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */
656 : 0 : status_count += n;
657 [ # # ]: 0 : if (n != 1)
658 : 0 : ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n",
659 : : n);
660 : : }
661 : :
662 : : /* check for single failure */
663 [ # # ]: 0 : if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
664 : 0 : ERR_RETURN("Error, unexpected successful DMA transaction\n");
665 [ # # ]: 0 : for (j = 1; j < status_count; j++)
666 : 0 : if (status[j] != RTE_DMA_STATUS_SUCCESSFUL
667 [ # # ]: 0 : && status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED)
668 : 0 : ERR_RETURN("Error, unexpected DMA error reported\n");
669 : :
670 : : return 0;
671 : : }
672 : :
673 : : static int
674 : 0 : test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan,
675 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
676 : : {
677 : : /* When error occurs just collect a single error using "completed_status()"
678 : : * before going to back to completed() calls
679 : : */
680 : : enum rte_dma_status_code status;
681 : : uint16_t invalid_addr_id = 0;
682 : : uint16_t idx;
683 : : uint16_t count, status_count, count2;
684 : : unsigned int j;
685 : 0 : bool error = false;
686 : :
687 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
688 : 0 : int id = rte_dma_copy(dev_id, vchan,
689 : 0 : (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
690 [ # # ]: 0 : rte_mbuf_data_iova(dsts[j]), COPY_LEN, 0);
691 [ # # ]: 0 : if (id < 0)
692 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
693 [ # # ]: 0 : if (j == fail_idx)
694 : 0 : invalid_addr_id = id;
695 : : }
696 : 0 : rte_dma_submit(dev_id, vchan);
697 : 0 : await_hw(dev_id, vchan);
698 : :
699 : : /* get up to the error point */
700 : : count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
701 [ # # ]: 0 : if (count != fail_idx)
702 : 0 : ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
703 : : count, fail_idx);
704 [ # # ]: 0 : if (!error)
705 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
706 : : fail_idx);
707 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
708 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
709 : : fail_idx, idx, invalid_addr_id - 1);
710 : :
711 : : /* get the error code */
712 : : status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status);
713 [ # # ]: 0 : if (status_count != 1)
714 : 0 : ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
715 : : fail_idx, status_count, COMP_BURST_SZ - count);
716 [ # # ]: 0 : if (status == RTE_DMA_STATUS_SUCCESSFUL)
717 : 0 : ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
718 : : fail_idx);
719 : :
720 : : /* delay in case time needed after err handled to complete other jobs */
721 : 0 : await_hw(dev_id, vchan);
722 : :
723 : : /* get the rest of the completions without status */
724 : : count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
725 [ # # ]: 0 : if (error == true)
726 : 0 : ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n",
727 : : fail_idx);
728 [ # # ]: 0 : if (count + status_count + count2 != COMP_BURST_SZ)
729 : 0 : ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n",
730 : : count + status_count + count2, COMP_BURST_SZ);
731 : :
732 : : return 0;
733 : : }
734 : :
735 : : static int
736 : 0 : test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts,
737 : : const unsigned int *fail, size_t num_fail)
738 : : {
739 : : /* test having multiple errors in one go */
740 : : enum rte_dma_status_code status[COMP_BURST_SZ];
741 : : unsigned int i, j;
742 : : uint16_t count, err_count = 0;
743 : 0 : bool error = false;
744 : :
745 : : /* enqueue and gather completions in one go */
746 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
747 : 0 : uintptr_t src = rte_mbuf_data_iova(srcs[j]);
748 : : /* set up for failure if the current index is anywhere is the fails array */
749 [ # # ]: 0 : for (i = 0; i < num_fail; i++)
750 [ # # ]: 0 : if (j == fail[i])
751 : : src = 0;
752 : :
753 : 0 : int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
754 : : COPY_LEN, 0);
755 [ # # ]: 0 : if (id < 0)
756 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
757 : : }
758 : 0 : rte_dma_submit(dev_id, vchan);
759 : 0 : await_hw(dev_id, vchan);
760 : :
761 : : count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status);
762 [ # # ]: 0 : while (count < COMP_BURST_SZ) {
763 : 0 : await_hw(dev_id, vchan);
764 : :
765 : 0 : uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count,
766 : 0 : NULL, &status[count]);
767 [ # # ]: 0 : if (ret == 0)
768 : 0 : ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n",
769 : : count, COMP_BURST_SZ);
770 : 0 : count += ret;
771 : : }
772 [ # # ]: 0 : for (i = 0; i < count; i++)
773 [ # # ]: 0 : if (status[i] != RTE_DMA_STATUS_SUCCESSFUL)
774 : 0 : err_count++;
775 : :
776 [ # # ]: 0 : if (err_count != num_fail)
777 : 0 : ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n",
778 : : err_count, num_fail);
779 : :
780 : : /* enqueue and gather completions in bursts, but getting errors one at a time */
781 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
782 : 0 : uintptr_t src = rte_mbuf_data_iova(srcs[j]);
783 : : /* set up for failure if the current index is anywhere is the fails array */
784 [ # # ]: 0 : for (i = 0; i < num_fail; i++)
785 [ # # ]: 0 : if (j == fail[i])
786 : : src = 0;
787 : :
788 : 0 : int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
789 : : COPY_LEN, 0);
790 [ # # ]: 0 : if (id < 0)
791 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
792 : : }
793 : : rte_dma_submit(dev_id, vchan);
794 : 0 : await_hw(dev_id, vchan);
795 : :
796 : : count = 0;
797 : : err_count = 0;
798 [ # # ]: 0 : while (count + err_count < COMP_BURST_SZ) {
799 : 0 : count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error);
800 [ # # ]: 0 : if (error) {
801 : : uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1,
802 : : NULL, status);
803 [ # # ]: 0 : if (ret != 1)
804 : 0 : ERR_RETURN("Error getting error-status for completions\n");
805 : 0 : err_count += ret;
806 : 0 : await_hw(dev_id, vchan);
807 : : }
808 : : }
809 [ # # ]: 0 : if (err_count != num_fail)
810 : 0 : ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n",
811 : : err_count, num_fail);
812 : :
813 : : return 0;
814 : : }
815 : :
816 : : static int
817 : 0 : test_completion_status(int16_t dev_id, uint16_t vchan, bool fence)
818 : : {
819 : 0 : const unsigned int fail[] = {0, 7, 14, 15};
820 : : struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
821 : : unsigned int i;
822 : :
823 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
824 : 0 : srcs[i] = rte_pktmbuf_alloc(pool);
825 : 0 : dsts[i] = rte_pktmbuf_alloc(pool);
826 : : }
827 : :
828 [ # # ]: 0 : for (i = 0; i < RTE_DIM(fail); i++) {
829 [ # # ]: 0 : if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0)
830 : : return -1;
831 : :
832 [ # # ]: 0 : if (test_individual_status_query_with_failure(dev_id, vchan, fence,
833 : : srcs, dsts, fail[i]) < 0)
834 : : return -1;
835 : :
836 : : /* test is run the same fenced, or unfenced, but no harm in running it twice */
837 [ # # ]: 0 : if (test_single_item_status_query_with_failure(dev_id, vchan,
838 : : srcs, dsts, fail[i]) < 0)
839 : : return -1;
840 : : }
841 : :
842 [ # # ]: 0 : if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0)
843 : : return -1;
844 : :
845 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
846 : 0 : rte_pktmbuf_free(srcs[i]);
847 : 0 : rte_pktmbuf_free(dsts[i]);
848 : : }
849 : : return 0;
850 : : }
851 : :
852 : : static int
853 : 0 : test_completion_handling(int16_t dev_id, uint16_t vchan)
854 : : {
855 : 0 : return test_completion_status(dev_id, vchan, false) /* without fences */
856 [ # # # # ]: 0 : || test_completion_status(dev_id, vchan, true); /* with fences */
857 : : }
858 : :
859 : : static int
860 : 0 : test_enqueue_fill(int16_t dev_id, uint16_t vchan)
861 : : {
862 : 0 : const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
863 : : struct rte_mbuf *dst;
864 : : char *dst_data;
865 : 0 : uint64_t pattern = 0xfedcba9876543210;
866 : : unsigned int i, j;
867 : :
868 : 0 : dst = rte_pktmbuf_alloc(pool);
869 [ # # ]: 0 : if (dst == NULL)
870 : 0 : ERR_RETURN("Failed to allocate mbuf\n");
871 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
872 : :
873 [ # # ]: 0 : for (i = 0; i < RTE_DIM(lengths); i++) {
874 : : /* reset dst_data */
875 : 0 : memset(dst_data, 0, rte_pktmbuf_data_len(dst));
876 : :
877 : : /* perform the fill operation */
878 : 0 : int id = rte_dma_fill(dev_id, vchan, pattern,
879 : 0 : rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT);
880 [ # # ]: 0 : if (id < 0)
881 : 0 : ERR_RETURN("Error with rte_dma_fill\n");
882 : 0 : await_hw(dev_id, vchan);
883 : :
884 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1)
885 : 0 : ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]);
886 : : /* check the data from the fill operation is correct */
887 [ # # ]: 0 : for (j = 0; j < lengths[i]; j++) {
888 : 0 : char pat_byte = ((char *)&pattern)[j % 8];
889 [ # # ]: 0 : if (dst_data[j] != pat_byte)
890 : 0 : ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
891 : : lengths[i], dst_data[j], pat_byte);
892 : : }
893 : : /* check that the data after the fill operation was not written to */
894 [ # # ]: 0 : for (; j < rte_pktmbuf_data_len(dst); j++)
895 [ # # ]: 0 : if (dst_data[j] != 0)
896 : 0 : ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n",
897 : : lengths[i], dst_data[j], 0);
898 : : }
899 : :
900 : 0 : rte_pktmbuf_free(dst);
901 : 0 : return 0;
902 : : }
903 : :
904 : : static int
905 : 0 : test_burst_capacity(int16_t dev_id, uint16_t vchan)
906 : : {
907 : : #define CAP_TEST_BURST_SIZE 64
908 : 0 : const int ring_space = rte_dma_burst_capacity(dev_id, vchan);
909 : : struct rte_mbuf *src, *dst;
910 : : int i, j, iter;
911 : : int cap, ret;
912 : : bool dma_err;
913 : :
914 : 0 : src = rte_pktmbuf_alloc(pool);
915 : 0 : dst = rte_pktmbuf_alloc(pool);
916 : :
917 : : /* to test capacity, we enqueue elements and check capacity is reduced
918 : : * by one each time - rebaselining the expected value after each burst
919 : : * as the capacity is only for a burst. We enqueue multiple bursts to
920 : : * fill up half the ring, before emptying it again. We do this multiple
921 : : * times to ensure that we get to test scenarios where we get ring
922 : : * wrap-around and wrap-around of the ids returned (at UINT16_MAX).
923 : : */
924 [ # # ]: 0 : for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) {
925 [ # # ]: 0 : for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
926 : 0 : cap = rte_dma_burst_capacity(dev_id, vchan);
927 : :
928 [ # # ]: 0 : for (j = 0; j < CAP_TEST_BURST_SIZE; j++) {
929 : 0 : ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src),
930 : 0 : rte_pktmbuf_iova(dst), COPY_LEN, 0);
931 [ # # ]: 0 : if (ret < 0)
932 : 0 : ERR_RETURN("Error with rte_dmadev_copy\n");
933 : :
934 [ # # ]: 0 : if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1))
935 : 0 : ERR_RETURN("Error, ring capacity did not change as expected\n");
936 : : }
937 [ # # ]: 0 : if (rte_dma_submit(dev_id, vchan) < 0)
938 : 0 : ERR_RETURN("Error, failed to submit burst\n");
939 : :
940 [ # # ]: 0 : if (cap < rte_dma_burst_capacity(dev_id, vchan))
941 : 0 : ERR_RETURN("Error, avail ring capacity has gone up, not down\n");
942 : : }
943 : 0 : await_hw(dev_id, vchan);
944 : :
945 [ # # ]: 0 : for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
946 : 0 : ret = rte_dma_completed(dev_id, vchan,
947 : : CAP_TEST_BURST_SIZE, NULL, &dma_err);
948 [ # # # # ]: 0 : if (ret != CAP_TEST_BURST_SIZE || dma_err) {
949 : : enum rte_dma_status_code status;
950 : :
951 : : rte_dma_completed_status(dev_id, vchan, 1, NULL, &status);
952 : 0 : ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n",
953 : : ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status);
954 : : }
955 : : }
956 : 0 : cap = rte_dma_burst_capacity(dev_id, vchan);
957 [ # # ]: 0 : if (cap != ring_space)
958 : 0 : ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n",
959 : : cap, ring_space);
960 : : }
961 : :
962 : 0 : rte_pktmbuf_free(src);
963 : 0 : rte_pktmbuf_free(dst);
964 : :
965 : 0 : return 0;
966 : : }
967 : :
968 : : static int
969 : 0 : test_m2d_auto_free(int16_t dev_id, uint16_t vchan)
970 : : {
971 : : #define NR_MBUF 256
972 : : struct rte_mempool_cache *cache;
973 : : struct rte_mbuf *src[NR_MBUF];
974 : : uint32_t buf_cnt1, buf_cnt2;
975 : : struct rte_mempool_ops *ops;
976 : : uint16_t nb_done = 0;
977 : 0 : bool dma_err = false;
978 : : int retry = 100;
979 : : int i, ret = 0;
980 : : rte_iova_t dst;
981 : :
982 [ # # ]: 0 : dst = (rte_iova_t)env_test_param[TEST_PARAM_REMOTE_ADDR];
983 : :
984 : : /* Capture buffer count before allocating source buffer. */
985 [ # # ]: 0 : cache = rte_mempool_default_cache(pool, rte_lcore_id());
986 [ # # ]: 0 : ops = rte_mempool_get_ops(pool->ops_index);
987 : 0 : buf_cnt1 = ops->get_count(pool) + cache->len;
988 : :
989 [ # # ]: 0 : if (rte_pktmbuf_alloc_bulk(pool, src, NR_MBUF) != 0)
990 : 0 : ERR_RETURN("alloc src mbufs failed.\n");
991 : :
992 [ # # ]: 0 : if ((buf_cnt1 - NR_MBUF) != (ops->get_count(pool) + cache->len)) {
993 : : printf("Buffer count check failed.\n");
994 : : ret = -1;
995 : 0 : goto done;
996 : : }
997 : :
998 [ # # ]: 0 : for (i = 0; i < NR_MBUF; i++) {
999 : 0 : ret = rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(src[i]), dst,
1000 : : COPY_LEN, RTE_DMA_OP_FLAG_AUTO_FREE);
1001 : :
1002 [ # # ]: 0 : if (ret < 0) {
1003 : : printf("rte_dma_copy returned error.\n");
1004 : 0 : goto done;
1005 : : }
1006 : : }
1007 : :
1008 : 0 : rte_dma_submit(dev_id, vchan);
1009 : : do {
1010 : 0 : nb_done += rte_dma_completed(dev_id, vchan, (NR_MBUF - nb_done), NULL, &dma_err);
1011 [ # # ]: 0 : if (dma_err)
1012 : : break;
1013 : : /* Sleep for 1 millisecond */
1014 : 0 : rte_delay_us_sleep(1000);
1015 [ # # # # ]: 0 : } while (retry-- && (nb_done < NR_MBUF));
1016 : :
1017 : 0 : buf_cnt2 = ops->get_count(pool) + cache->len;
1018 [ # # # # ]: 0 : if ((buf_cnt1 != buf_cnt2) || dma_err) {
1019 : : printf("Free mem to dev buffer test failed.\n");
1020 : : ret = -1;
1021 : : }
1022 : :
1023 : 0 : done:
1024 : : /* If the test passes source buffer will be freed in hardware. */
1025 [ # # ]: 0 : if (ret < 0)
1026 : 0 : rte_pktmbuf_free_bulk(&src[nb_done], (NR_MBUF - nb_done));
1027 : :
1028 : : return ret;
1029 : : }
1030 : :
1031 : : static int
1032 : 0 : prepare_m2d_auto_free(int16_t dev_id, uint16_t vchan)
1033 : : {
1034 : 0 : const struct rte_dma_vchan_conf qconf = {
1035 : : .direction = RTE_DMA_DIR_MEM_TO_DEV,
1036 : : .nb_desc = TEST_RINGSIZE,
1037 : : .auto_free.m2d.pool = pool,
1038 : : .dst_port.port_type = RTE_DMA_PORT_PCIE,
1039 : : .dst_port.pcie.coreid = 0,
1040 : : };
1041 : :
1042 : : /* Stop the device to reconfigure vchan. */
1043 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
1044 : 0 : ERR_RETURN("Error stopping device %u\n", dev_id);
1045 : :
1046 [ # # ]: 0 : if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
1047 : 0 : ERR_RETURN("Error with queue configuration\n");
1048 : :
1049 [ # # ]: 0 : if (rte_dma_start(dev_id) != 0)
1050 : 0 : ERR_RETURN("Error with rte_dma_start()\n");
1051 : :
1052 : : return 0;
1053 : : }
1054 : :
1055 : : static int
1056 : 0 : test_dmadev_sg_copy_setup(void)
1057 : : {
1058 : : int ret = TEST_SUCCESS;
1059 : :
1060 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_OPS_COPY_SG) == 0)
1061 : 0 : return TEST_SKIPPED;
1062 : :
1063 : : return ret;
1064 : : }
1065 : :
1066 : : static int
1067 : 0 : test_dmadev_burst_setup(void)
1068 : : {
1069 [ # # ]: 0 : if (rte_dma_burst_capacity(test_dev_id, vchan) < 64) {
1070 : 0 : RTE_LOG(ERR, USER1,
1071 : : "DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
1072 : : test_dev_id);
1073 : 0 : return TEST_SKIPPED;
1074 : : }
1075 : :
1076 : : return TEST_SUCCESS;
1077 : : }
1078 : :
1079 : : static int
1080 : 0 : test_dmadev_err_handling_setup(void)
1081 : : {
1082 : : int ret = TEST_SKIPPED;
1083 : :
1084 : : /* to test error handling we can provide null pointers for source or dest in copies. This
1085 : : * requires VA mode in DPDK, since NULL(0) is a valid physical address.
1086 : : * We also need hardware that can report errors back.
1087 : : */
1088 [ # # ]: 0 : if (rte_eal_iova_mode() != RTE_IOVA_VA)
1089 : 0 : RTE_LOG(ERR, USER1,
1090 : : "DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n",
1091 : : test_dev_id);
1092 [ # # ]: 0 : else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0)
1093 : 0 : RTE_LOG(ERR, USER1,
1094 : : "DMA Dev %u: device does not report errors, skipping error handling tests\n",
1095 : : test_dev_id);
1096 : : else
1097 : : ret = TEST_SUCCESS;
1098 : :
1099 : 0 : return ret;
1100 : : }
1101 : :
1102 : : static int
1103 : 0 : test_dmadev_fill_setup(void)
1104 : : {
1105 : : int ret = TEST_SUCCESS;
1106 : :
1107 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0) {
1108 : 0 : RTE_LOG(ERR, USER1,
1109 : : "DMA Dev %u: No device fill support, skipping fill tests\n", test_dev_id);
1110 : : ret = TEST_SKIPPED;
1111 : : }
1112 : :
1113 : 0 : return ret;
1114 : : }
1115 : :
1116 : : static int
1117 : 0 : test_dmadev_autofree_setup(void)
1118 : : {
1119 : : int ret = TEST_SKIPPED;
1120 : :
1121 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_M2D_AUTO_FREE) &&
1122 [ # # ]: 0 : dma_add_test[TEST_M2D_AUTO_FREE].enabled == true) {
1123 [ # # ]: 0 : if (prepare_m2d_auto_free(test_dev_id, vchan) != 0)
1124 : 0 : return ret;
1125 : :
1126 : : ret = TEST_SUCCESS;
1127 : : }
1128 : :
1129 : : return ret;
1130 : : }
1131 : :
1132 : : static int
1133 : 0 : test_dmadev_setup(void)
1134 : : {
1135 : 0 : int16_t dev_id = test_dev_id;
1136 : : struct rte_dma_stats stats;
1137 : 0 : const struct rte_dma_conf conf = { .nb_vchans = 1};
1138 : 0 : const struct rte_dma_vchan_conf qconf = {
1139 : : .direction = RTE_DMA_DIR_MEM_TO_MEM,
1140 : : .nb_desc = TEST_RINGSIZE,
1141 : : };
1142 : : int ret;
1143 : :
1144 : 0 : ret = rte_dma_info_get(dev_id, &info);
1145 [ # # ]: 0 : if (ret != 0)
1146 : 0 : ERR_RETURN("Error with rte_dma_info_get()\n");
1147 : :
1148 [ # # ]: 0 : if (info.max_vchans < 1)
1149 : 0 : ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
1150 : :
1151 [ # # ]: 0 : if (rte_dma_configure(dev_id, &conf) != 0)
1152 : 0 : ERR_RETURN("Error with rte_dma_configure()\n");
1153 : :
1154 [ # # ]: 0 : if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
1155 : 0 : ERR_RETURN("Error with queue configuration\n");
1156 : :
1157 : 0 : ret = rte_dma_info_get(dev_id, &info);
1158 [ # # # # ]: 0 : if (ret != 0 || info.nb_vchans != 1)
1159 : 0 : ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
1160 : :
1161 [ # # ]: 0 : if (rte_dma_start(dev_id) != 0)
1162 : 0 : ERR_RETURN("Error with rte_dma_start()\n");
1163 : :
1164 [ # # ]: 0 : if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
1165 : 0 : ERR_RETURN("Error with rte_dma_stats_get()\n");
1166 : :
1167 [ # # ]: 0 : if (rte_dma_burst_capacity(dev_id, vchan) < 32)
1168 : 0 : ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
1169 : :
1170 [ # # # # : 0 : if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
# # ]
1171 : 0 : ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
1172 : : "submitted = %"PRIu64", errors = %"PRIu64"\n",
1173 : : stats.completed, stats.submitted, stats.errors);
1174 : 0 : id_count = 0;
1175 : :
1176 : : /* create a mempool for running tests */
1177 : 0 : pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
1178 : : TEST_RINGSIZE * 2, /* n == num elements */
1179 : : 32, /* cache size */
1180 : : 0, /* priv size */
1181 : : COPY_LEN + RTE_PKTMBUF_HEADROOM, /* data room size */
1182 : 0 : info.numa_node);
1183 [ # # ]: 0 : if (pool == NULL)
1184 : 0 : ERR_RETURN("Error with mempool creation\n");
1185 : :
1186 : 0 : check_err_stats = false;
1187 : 0 : vchan = 0;
1188 : :
1189 : 0 : return 0;
1190 : : }
1191 : :
1192 : : static void
1193 : 0 : test_dmadev_teardown(void)
1194 : : {
1195 : 0 : rte_mempool_free(pool);
1196 : 0 : rte_dma_stop(test_dev_id);
1197 : 0 : rte_dma_stats_reset(test_dev_id, vchan);
1198 : 0 : test_dev_id = -EINVAL;
1199 : 0 : }
1200 : :
1201 : : static int
1202 : 0 : test_dmadev_instance(int16_t dev_id)
1203 : : {
1204 : : struct rte_dma_info dev_info;
1205 : : enum {
1206 : : TEST_COPY = 0,
1207 : : TEST_COPY_SG,
1208 : : TEST_START,
1209 : : TEST_BURST,
1210 : : TEST_ERR,
1211 : : TEST_FILL,
1212 : : TEST_M2D,
1213 : : TEST_END
1214 : : };
1215 : :
1216 : : static struct runtest_param param[] = {
1217 : : {"copy", test_enqueue_copies, 640},
1218 : : {"sg_copy", test_enqueue_sg_copies, 1},
1219 : : {"stop_start", test_stop_start, 1},
1220 : : {"burst_capacity", test_burst_capacity, 1},
1221 : : {"error_handling", test_completion_handling, 1},
1222 : : {"fill", test_enqueue_fill, 1},
1223 : : {"m2d_auto_free", test_m2d_auto_free, 128},
1224 : : };
1225 : :
1226 : : static struct unit_test_suite ts = {
1227 : : .suite_name = "DMA dev instance testsuite",
1228 : : .setup = test_dmadev_setup,
1229 : : .teardown = test_dmadev_teardown,
1230 : : .unit_test_cases = {
1231 : : TEST_CASE_NAMED_WITH_DATA("copy",
1232 : : NULL, NULL,
1233 : : runtest, ¶m[TEST_COPY]),
1234 : : TEST_CASE_NAMED_WITH_DATA("sg_copy",
1235 : : test_dmadev_sg_copy_setup, NULL,
1236 : : runtest, ¶m[TEST_COPY_SG]),
1237 : : TEST_CASE_NAMED_WITH_DATA("stop_start",
1238 : : NULL, NULL,
1239 : : runtest, ¶m[TEST_START]),
1240 : : TEST_CASE_NAMED_WITH_DATA("burst_capacity",
1241 : : test_dmadev_burst_setup, NULL,
1242 : : runtest, ¶m[TEST_BURST]),
1243 : : TEST_CASE_NAMED_WITH_DATA("error_handling",
1244 : : test_dmadev_err_handling_setup, NULL,
1245 : : runtest, ¶m[TEST_ERR]),
1246 : : TEST_CASE_NAMED_WITH_DATA("fill",
1247 : : test_dmadev_fill_setup, NULL,
1248 : : runtest, ¶m[TEST_FILL]),
1249 : : TEST_CASE_NAMED_WITH_DATA("m2d_autofree",
1250 : : test_dmadev_autofree_setup, NULL,
1251 : : runtest, ¶m[TEST_M2D]),
1252 : : TEST_CASES_END()
1253 : : }
1254 : : };
1255 : :
1256 : : int ret;
1257 : :
1258 [ # # ]: 0 : if (rte_dma_info_get(dev_id, &dev_info) < 0)
1259 : : return TEST_SKIPPED;
1260 : :
1261 : 0 : test_dev_id = dev_id;
1262 : 0 : printf("\n### Test dmadev instance %u [%s]\n",
1263 : : test_dev_id, dev_info.dev_name);
1264 : :
1265 : 0 : ret = unit_test_suite_runner(&ts);
1266 : 0 : test_dev_id = -EINVAL;
1267 : :
1268 : 0 : return ret;
1269 : : }
1270 : :
1271 : : static void
1272 : 0 : parse_dma_env_var(void)
1273 : : {
1274 : 0 : char *dma_env_param_str = getenv("DPDK_ADD_DMA_TEST_PARAM");
1275 : 0 : char *dma_env_test_str = getenv("DPDK_ADD_DMA_TEST");
1276 : 0 : char *params[32] = {0};
1277 : 0 : char *tests[32] = {0};
1278 : 0 : char *var[2] = {0};
1279 : : int n_var = 0;
1280 : : int i, j;
1281 : :
1282 : : /* Additional test from commandline. */
1283 [ # # # # ]: 0 : if (dma_env_test_str && strlen(dma_env_test_str) > 0) {
1284 : 0 : n_var = rte_strsplit(dma_env_test_str, strlen(dma_env_test_str), tests,
1285 : : RTE_DIM(tests), ',');
1286 [ # # ]: 0 : for (i = 0; i < n_var; i++) {
1287 [ # # ]: 0 : for (j = 0; j < TEST_MAX; j++) {
1288 [ # # ]: 0 : if (!strcmp(tests[i], dma_add_test[j].name))
1289 : 0 : dma_add_test[j].enabled = true;
1290 : : }
1291 : : }
1292 : : }
1293 : :
1294 : : /* Commandline variables for test */
1295 [ # # # # ]: 0 : if (dma_env_param_str && strlen(dma_env_param_str) > 0) {
1296 : 0 : n_var = rte_strsplit(dma_env_param_str, strlen(dma_env_param_str), params,
1297 : : RTE_DIM(params), ',');
1298 [ # # ]: 0 : for (i = 0; i < n_var; i++) {
1299 : 0 : rte_strsplit(params[i], strlen(params[i]), var, RTE_DIM(var), '=');
1300 [ # # ]: 0 : for (j = 0; j < TEST_PARAM_MAX; j++) {
1301 [ # # ]: 0 : if (!strcmp(var[0], dma_test_param[j]))
1302 : 0 : env_test_param[j] = strtoul(var[1], NULL, 16);
1303 : : }
1304 : : }
1305 : : }
1306 : 0 : }
1307 : :
1308 : : static int
1309 : 0 : test_dma(void)
1310 : : {
1311 : : const char *pmd = "dma_skeleton";
1312 : : int i;
1313 : :
1314 : 0 : parse_dma_env_var();
1315 : :
1316 : : /* attempt to create skeleton instance - ignore errors due to one being already present*/
1317 : 0 : rte_vdev_init(pmd, NULL);
1318 : :
1319 [ # # ]: 0 : if (rte_dma_count_avail() == 0)
1320 : : return TEST_SKIPPED;
1321 : :
1322 [ # # ]: 0 : RTE_DMA_FOREACH_DEV(i) {
1323 [ # # ]: 0 : if (test_dma_api(i) < 0)
1324 : 0 : ERR_RETURN("Error performing API tests\n");
1325 : :
1326 [ # # ]: 0 : if (test_dmadev_instance(i) < 0)
1327 : 0 : ERR_RETURN("Error, test failure for device %d\n", i);
1328 : : }
1329 : :
1330 : : return 0;
1331 : : }
1332 : :
1333 : 252 : REGISTER_DRIVER_TEST(dmadev_autotest, test_dma);
|