Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2021 HiSilicon Limited
3 : : * Copyright(c) 2021 Intel Corporation
4 : : */
5 : :
6 : : #include <inttypes.h>
7 : :
8 : : #include <rte_dmadev.h>
9 : : #include <rte_mbuf.h>
10 : : #include <rte_pause.h>
11 : : #include <rte_cycles.h>
12 : : #include <rte_random.h>
13 : : #include <rte_bus_vdev.h>
14 : : #include <rte_dmadev_pmd.h>
15 : :
16 : : #include "test.h"
17 : : #include "test_dmadev_api.h"
18 : :
19 : : #define ERR_RETURN(...) do { print_err(__func__, __LINE__, __VA_ARGS__); return -1; } while (0)
20 : :
21 : : #define TEST_RINGSIZE 512
22 : : #define COPY_LEN 1024
23 : :
24 : : static struct rte_mempool *pool;
25 : : static uint16_t id_count;
26 : :
27 : : enum {
28 : : TEST_PARAM_REMOTE_ADDR = 0,
29 : : TEST_PARAM_MAX,
30 : : };
31 : :
32 : : static const char * const dma_test_param[] = {
33 : : [TEST_PARAM_REMOTE_ADDR] = "remote_addr",
34 : : };
35 : :
36 : : static uint64_t env_test_param[TEST_PARAM_MAX];
37 : :
38 : : enum {
39 : : TEST_M2D_AUTO_FREE = 0,
40 : : TEST_MAX,
41 : : };
42 : :
43 : : struct dma_add_test {
44 : : const char *name;
45 : : bool enabled;
46 : : };
47 : :
48 : : struct dma_add_test dma_add_test[] = {
49 : : [TEST_M2D_AUTO_FREE] = {.name = "m2d_auto_free", .enabled = false},
50 : : };
51 : :
52 : : static void
53 : : __rte_format_printf(3, 4)
54 : 0 : print_err(const char *func, int lineno, const char *format, ...)
55 : : {
56 : : va_list ap;
57 : :
58 : 0 : fprintf(stderr, "In %s:%d - ", func, lineno);
59 : 0 : va_start(ap, format);
60 : 0 : vfprintf(stderr, format, ap);
61 : 0 : va_end(ap);
62 : 0 : }
63 : :
64 : : static int
65 : 0 : runtest(const char *printable, int (*test_fn)(int16_t dev_id, uint16_t vchan), int iterations,
66 : : int16_t dev_id, uint16_t vchan, bool check_err_stats)
67 : : {
68 : : struct rte_dma_stats stats;
69 : : int i;
70 : :
71 : 0 : rte_dma_stats_reset(dev_id, vchan);
72 [ # # ]: 0 : printf("DMA Dev %d: Running %s Tests %s\n", dev_id, printable,
73 : : check_err_stats ? " " : "(errors expected)");
74 [ # # ]: 0 : for (i = 0; i < iterations; i++) {
75 [ # # ]: 0 : if (test_fn(dev_id, vchan) < 0)
76 : : return -1;
77 : :
78 : 0 : rte_dma_stats_get(dev_id, 0, &stats);
79 : 0 : printf("Ops submitted: %"PRIu64"\t", stats.submitted);
80 : 0 : printf("Ops completed: %"PRIu64"\t", stats.completed);
81 : 0 : printf("Errors: %"PRIu64"\r", stats.errors);
82 : :
83 [ # # ]: 0 : if (stats.completed != stats.submitted)
84 : 0 : ERR_RETURN("\nError, not all submitted jobs are reported as completed\n");
85 [ # # # # ]: 0 : if (check_err_stats && stats.errors != 0)
86 : 0 : ERR_RETURN("\nErrors reported during op processing, aborting tests\n");
87 : : }
88 : : printf("\n");
89 : 0 : return 0;
90 : : }
91 : :
92 : : static void
93 : 0 : await_hw(int16_t dev_id, uint16_t vchan)
94 : : {
95 : : enum rte_dma_vchan_status st;
96 : :
97 [ # # ]: 0 : if (rte_dma_vchan_status(dev_id, vchan, &st) < 0) {
98 : : /* for drivers that don't support this op, just sleep for 1 millisecond */
99 : 0 : rte_delay_us_sleep(1000);
100 : 0 : return;
101 : : }
102 : :
103 : : /* for those that do, *max* end time is one second from now, but all should be faster */
104 : 0 : const uint64_t end_cycles = rte_get_timer_cycles() + rte_get_timer_hz();
105 [ # # # # ]: 0 : while (st == RTE_DMA_VCHAN_ACTIVE && rte_get_timer_cycles() < end_cycles) {
106 : : rte_pause();
107 : 0 : rte_dma_vchan_status(dev_id, vchan, &st);
108 : : }
109 : : }
110 : :
111 : : /* run a series of copy tests just using some different options for enqueues and completions */
112 : : static int
113 : 0 : do_multi_copies(int16_t dev_id, uint16_t vchan,
114 : : int split_batches, /* submit 2 x 16 or 1 x 32 burst */
115 : : int split_completions, /* gather 2 x 16 or 1 x 32 completions */
116 : : int use_completed_status) /* use completed or completed_status function */
117 : : {
118 : : struct rte_mbuf *srcs[32], *dsts[32];
119 : : enum rte_dma_status_code sc[32];
120 : : unsigned int i, j;
121 : 0 : bool dma_err = false;
122 : :
123 : : /* Enqueue burst of copies and hit doorbell */
124 [ # # ]: 0 : for (i = 0; i < RTE_DIM(srcs); i++) {
125 : : uint64_t *src_data;
126 : :
127 [ # # ]: 0 : if (split_batches && i == RTE_DIM(srcs) / 2)
128 : 0 : rte_dma_submit(dev_id, vchan);
129 : :
130 : 0 : srcs[i] = rte_pktmbuf_alloc(pool);
131 : 0 : dsts[i] = rte_pktmbuf_alloc(pool);
132 [ # # # # ]: 0 : if (srcs[i] == NULL || dsts[i] == NULL)
133 : 0 : ERR_RETURN("Error allocating buffers\n");
134 : :
135 : 0 : src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
136 [ # # ]: 0 : for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
137 : 0 : src_data[j] = rte_rand();
138 : :
139 : 0 : if (rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(srcs[i]),
140 [ # # ]: 0 : rte_mbuf_data_iova(dsts[i]), COPY_LEN, 0) != id_count++)
141 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
142 : : }
143 : 0 : rte_dma_submit(dev_id, vchan);
144 : :
145 : 0 : await_hw(dev_id, vchan);
146 : :
147 [ # # ]: 0 : if (split_completions) {
148 : : /* gather completions in two halves */
149 : : uint16_t half_len = RTE_DIM(srcs) / 2;
150 : 0 : int ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
151 [ # # # # ]: 0 : if (ret != half_len || dma_err)
152 : 0 : ERR_RETURN("Error with rte_dma_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
153 : : ret, half_len, dma_err);
154 : :
155 : 0 : ret = rte_dma_completed(dev_id, vchan, half_len, NULL, &dma_err);
156 [ # # # # ]: 0 : if (ret != half_len || dma_err)
157 : 0 : ERR_RETURN("Error with rte_dma_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
158 : : ret, half_len, dma_err);
159 : : } else {
160 : : /* gather all completions in one go, using either
161 : : * completed or completed_status fns
162 : : */
163 [ # # ]: 0 : if (!use_completed_status) {
164 : 0 : int n = rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
165 [ # # # # ]: 0 : if (n != RTE_DIM(srcs) || dma_err)
166 : 0 : ERR_RETURN("Error with rte_dma_completed, %u [expected: %zu], dma_err = %d\n",
167 : : n, RTE_DIM(srcs), dma_err);
168 : : } else {
169 : 0 : int n = rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc);
170 [ # # ]: 0 : if (n != RTE_DIM(srcs))
171 : 0 : ERR_RETURN("Error with rte_dma_completed_status, %u [expected: %zu]\n",
172 : : n, RTE_DIM(srcs));
173 : :
174 [ # # ]: 0 : for (j = 0; j < (uint16_t)n; j++)
175 [ # # ]: 0 : if (sc[j] != RTE_DMA_STATUS_SUCCESSFUL)
176 : 0 : ERR_RETURN("Error with rte_dma_completed_status, job %u reports failure [code %u]\n",
177 : : j, sc[j]);
178 : : }
179 : : }
180 : :
181 : : /* check for empty */
182 : : int ret = use_completed_status ?
183 [ # # ]: 0 : rte_dma_completed_status(dev_id, vchan, RTE_DIM(srcs), NULL, sc) :
184 : : rte_dma_completed(dev_id, vchan, RTE_DIM(srcs), NULL, &dma_err);
185 [ # # ]: 0 : if (ret != 0)
186 : 0 : ERR_RETURN("Error with completion check - ops unexpectedly returned\n");
187 : :
188 [ # # ]: 0 : for (i = 0; i < RTE_DIM(srcs); i++) {
189 : : char *src_data, *dst_data;
190 : :
191 : 0 : src_data = rte_pktmbuf_mtod(srcs[i], char *);
192 : 0 : dst_data = rte_pktmbuf_mtod(dsts[i], char *);
193 [ # # ]: 0 : for (j = 0; j < COPY_LEN; j++)
194 [ # # ]: 0 : if (src_data[j] != dst_data[j])
195 : 0 : ERR_RETURN("Error with copy of packet %u, byte %u\n", i, j);
196 : :
197 : 0 : rte_pktmbuf_free(srcs[i]);
198 : 0 : rte_pktmbuf_free(dsts[i]);
199 : : }
200 : : return 0;
201 : : }
202 : :
203 : : static int
204 : 0 : test_single_copy(int16_t dev_id, uint16_t vchan)
205 : : {
206 : : uint16_t i;
207 : : uint16_t id;
208 : : enum rte_dma_status_code status;
209 : : struct rte_mbuf *src, *dst;
210 : : char *src_data, *dst_data;
211 : :
212 : 0 : src = rte_pktmbuf_alloc(pool);
213 : 0 : dst = rte_pktmbuf_alloc(pool);
214 : 0 : src_data = rte_pktmbuf_mtod(src, char *);
215 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
216 : :
217 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
218 : 0 : src_data[i] = rte_rand() & 0xFF;
219 : :
220 : 0 : id = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src), rte_pktmbuf_iova(dst),
221 : : COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT);
222 [ # # ]: 0 : if (id != id_count)
223 : 0 : ERR_RETURN("Error with rte_dma_copy, got %u, expected %u\n",
224 : : id, id_count);
225 : :
226 : : /* give time for copy to finish, then check it was done */
227 : 0 : await_hw(dev_id, vchan);
228 : :
229 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
230 [ # # ]: 0 : if (dst_data[i] != src_data[i])
231 : 0 : ERR_RETURN("Data mismatch at char %u [Got %02x not %02x]\n", i,
232 : : dst_data[i], src_data[i]);
233 : :
234 : : /* now check completion works */
235 : 0 : id = ~id;
236 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 1)
237 : 0 : ERR_RETURN("Error with rte_dma_completed\n");
238 : :
239 [ # # ]: 0 : if (id != id_count)
240 : 0 : ERR_RETURN("Error:incorrect job id received, %u [expected %u]\n",
241 : : id, id_count);
242 : :
243 : : /* check for completed and id when no job done */
244 : 0 : id = ~id;
245 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, &id, NULL) != 0)
246 : 0 : ERR_RETURN("Error with rte_dma_completed when no job done\n");
247 [ # # ]: 0 : if (id != id_count)
248 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
249 : : id, id_count);
250 : :
251 : : /* check for completed_status and id when no job done */
252 : 0 : id = ~id;
253 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
254 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
255 [ # # ]: 0 : if (id != id_count)
256 : 0 : ERR_RETURN("Error:incorrect job id received when no job done, %u [expected %u]\n",
257 : : id, id_count);
258 : :
259 : 0 : rte_pktmbuf_free(src);
260 : 0 : rte_pktmbuf_free(dst);
261 : :
262 : : /* now check completion returns nothing more */
263 [ # # ]: 0 : if (rte_dma_completed(dev_id, 0, 1, NULL, NULL) != 0)
264 : 0 : ERR_RETURN("Error with rte_dma_completed in empty check\n");
265 : :
266 : 0 : id_count++;
267 : :
268 : 0 : return 0;
269 : : }
270 : :
271 : : static int
272 : 0 : test_enqueue_copies(int16_t dev_id, uint16_t vchan)
273 : : {
274 : : unsigned int i;
275 : :
276 : : /* test doing a single copy */
277 [ # # ]: 0 : if (test_single_copy(dev_id, vchan) < 0)
278 : : return -1;
279 : :
280 : : /* test doing a multiple single copies */
281 : : do {
282 : : uint16_t id;
283 : : const uint16_t max_ops = 4;
284 : : struct rte_mbuf *src, *dst;
285 : : char *src_data, *dst_data;
286 : : uint16_t count;
287 : :
288 : 0 : src = rte_pktmbuf_alloc(pool);
289 : 0 : dst = rte_pktmbuf_alloc(pool);
290 : 0 : src_data = rte_pktmbuf_mtod(src, char *);
291 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
292 : :
293 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
294 : 0 : src_data[i] = rte_rand() & 0xFF;
295 : :
296 : : /* perform the same copy <max_ops> times */
297 [ # # ]: 0 : for (i = 0; i < max_ops; i++)
298 : 0 : if (rte_dma_copy(dev_id, vchan,
299 : 0 : rte_pktmbuf_iova(src),
300 : 0 : rte_pktmbuf_iova(dst),
301 [ # # ]: 0 : COPY_LEN, RTE_DMA_OP_FLAG_SUBMIT) != id_count++)
302 : 0 : ERR_RETURN("Error with rte_dma_copy\n");
303 : :
304 : 0 : await_hw(dev_id, vchan);
305 : :
306 : : count = rte_dma_completed(dev_id, vchan, max_ops * 2, &id, NULL);
307 [ # # ]: 0 : if (count != max_ops)
308 : 0 : ERR_RETURN("Error with rte_dma_completed, got %u not %u\n",
309 : : count, max_ops);
310 : :
311 [ # # ]: 0 : if (id != id_count - 1)
312 : 0 : ERR_RETURN("Error, incorrect job id returned: got %u not %u\n",
313 : : id, id_count - 1);
314 : :
315 [ # # ]: 0 : for (i = 0; i < COPY_LEN; i++)
316 [ # # ]: 0 : if (dst_data[i] != src_data[i])
317 : 0 : ERR_RETURN("Data mismatch at char %u\n", i);
318 : :
319 : 0 : rte_pktmbuf_free(src);
320 : 0 : rte_pktmbuf_free(dst);
321 : : } while (0);
322 : :
323 : : /* test doing multiple copies */
324 : 0 : return do_multi_copies(dev_id, vchan, 0, 0, 0) /* enqueue and complete 1 batch at a time */
325 : : /* enqueue 2 batches and then complete both */
326 [ # # ]: 0 : || do_multi_copies(dev_id, vchan, 1, 0, 0)
327 : : /* enqueue 1 batch, then complete in two halves */
328 [ # # ]: 0 : || do_multi_copies(dev_id, vchan, 0, 1, 0)
329 : : /* test using completed_status in place of regular completed API */
330 [ # # # # ]: 0 : || do_multi_copies(dev_id, vchan, 0, 0, 1);
331 : : }
332 : :
333 : : static int
334 : 0 : test_stop_start(int16_t dev_id, uint16_t vchan)
335 : : {
336 : : /* device is already started on input, should be (re)started on output */
337 : :
338 : 0 : uint16_t id = 0;
339 : 0 : enum rte_dma_status_code status = RTE_DMA_STATUS_SUCCESSFUL;
340 : :
341 : : /* - test stopping a device works ok,
342 : : * - then do a start-stop without doing a copy
343 : : * - finally restart the device
344 : : * checking for errors at each stage, and validating we can still copy at the end.
345 : : */
346 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
347 : 0 : ERR_RETURN("Error stopping device\n");
348 : :
349 [ # # ]: 0 : if (rte_dma_start(dev_id) < 0)
350 : 0 : ERR_RETURN("Error restarting device\n");
351 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
352 : 0 : ERR_RETURN("Error stopping device after restart (no jobs executed)\n");
353 : :
354 [ # # ]: 0 : if (rte_dma_start(dev_id) < 0)
355 : 0 : ERR_RETURN("Error restarting device after multiple stop-starts\n");
356 : :
357 : : /* before doing a copy, we need to know what the next id will be it should
358 : : * either be:
359 : : * - the last completed job before start if driver does not reset id on stop
360 : : * - or -1 i.e. next job is 0, if driver does reset the job ids on stop
361 : : */
362 [ # # ]: 0 : if (rte_dma_completed_status(dev_id, vchan, 1, &id, &status) != 0)
363 : 0 : ERR_RETURN("Error with rte_dma_completed_status when no job done\n");
364 : 0 : id += 1; /* id_count is next job id */
365 [ # # # # ]: 0 : if (id != id_count && id != 0)
366 : 0 : ERR_RETURN("Unexpected next id from device after stop-start. Got %u, expected %u or 0\n",
367 : : id, id_count);
368 : :
369 : 0 : id_count = id;
370 [ # # ]: 0 : if (test_single_copy(dev_id, vchan) < 0)
371 : 0 : ERR_RETURN("Error performing copy after device restart\n");
372 : : return 0;
373 : : }
374 : :
375 : : /* Failure handling test cases - global macros and variables for those tests*/
376 : : #define COMP_BURST_SZ 16
377 : : #define OPT_FENCE(idx) ((fence && idx == 8) ? RTE_DMA_OP_FLAG_FENCE : 0)
378 : :
379 : : static int
380 : 0 : test_failure_in_full_burst(int16_t dev_id, uint16_t vchan, bool fence,
381 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
382 : : {
383 : : /* Test single full batch statuses with failures */
384 : : enum rte_dma_status_code status[COMP_BURST_SZ];
385 : : struct rte_dma_stats baseline, stats;
386 : : uint16_t invalid_addr_id = 0;
387 : : uint16_t idx;
388 : : uint16_t count, status_count;
389 : : unsigned int i;
390 : 0 : bool error = false;
391 : : int err_count = 0;
392 : :
393 : 0 : rte_dma_stats_get(dev_id, vchan, &baseline); /* get a baseline set of stats */
394 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
395 : 0 : int id = rte_dma_copy(dev_id, vchan,
396 : 0 : (i == fail_idx ? 0 : rte_mbuf_data_iova(srcs[i])),
397 [ # # # # ]: 0 : rte_mbuf_data_iova(dsts[i]), COPY_LEN, OPT_FENCE(i));
398 [ # # ]: 0 : if (id < 0)
399 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", i);
400 [ # # ]: 0 : if (i == fail_idx)
401 : 0 : invalid_addr_id = id;
402 : : }
403 : : rte_dma_submit(dev_id, vchan);
404 : 0 : rte_dma_stats_get(dev_id, vchan, &stats);
405 [ # # ]: 0 : if (stats.submitted != baseline.submitted + COMP_BURST_SZ)
406 : 0 : ERR_RETURN("Submitted stats value not as expected, %"PRIu64" not %"PRIu64"\n",
407 : : stats.submitted, baseline.submitted + COMP_BURST_SZ);
408 : :
409 : 0 : await_hw(dev_id, vchan);
410 : :
411 : : count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
412 [ # # ]: 0 : if (count != fail_idx)
413 : 0 : ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
414 : : count, fail_idx);
415 [ # # ]: 0 : if (!error)
416 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
417 : : fail_idx);
418 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
419 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
420 : : fail_idx, idx, invalid_addr_id - 1);
421 : :
422 : : /* all checks ok, now verify calling completed() again always returns 0 */
423 [ # # ]: 0 : for (i = 0; i < 10; i++)
424 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error) != 0
425 [ # # # # ]: 0 : || error == false || idx != (invalid_addr_id - 1))
426 : 0 : ERR_RETURN("Error with follow-up completed calls for fail idx %u\n",
427 : : fail_idx);
428 : :
429 : : status_count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ,
430 : : &idx, status);
431 : : /* some HW may stop on error and be restarted after getting error status for single value
432 : : * To handle this case, if we get just one error back, wait for more completions and get
433 : : * status for rest of the burst
434 : : */
435 [ # # ]: 0 : if (status_count == 1) {
436 : 0 : await_hw(dev_id, vchan);
437 : 0 : status_count += rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - 1,
438 : : &idx, &status[1]);
439 : : }
440 : : /* check that at this point we have all status values */
441 [ # # ]: 0 : if (status_count != COMP_BURST_SZ - count)
442 : 0 : ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
443 : : fail_idx, status_count, COMP_BURST_SZ - count);
444 : : /* now verify just one failure followed by multiple successful or skipped entries */
445 [ # # ]: 0 : if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
446 : 0 : ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
447 : : fail_idx);
448 [ # # ]: 0 : for (i = 1; i < status_count; i++)
449 : : /* after a failure in a burst, depending on ordering/fencing,
450 : : * operations may be successful or skipped because of previous error.
451 : : */
452 : 0 : if (status[i] != RTE_DMA_STATUS_SUCCESSFUL
453 [ # # ]: 0 : && status[i] != RTE_DMA_STATUS_NOT_ATTEMPTED)
454 : 0 : ERR_RETURN("Error with status calls for fail idx %u. Status for job %u (of %u) is not successful\n",
455 : : fail_idx, count + i, COMP_BURST_SZ);
456 : :
457 : : /* check the completed + errors stats are as expected */
458 : 0 : rte_dma_stats_get(dev_id, vchan, &stats);
459 [ # # ]: 0 : if (stats.completed != baseline.completed + COMP_BURST_SZ)
460 : 0 : ERR_RETURN("Completed stats value not as expected, %"PRIu64" not %"PRIu64"\n",
461 : : stats.completed, baseline.completed + COMP_BURST_SZ);
462 [ # # ]: 0 : for (i = 0; i < status_count; i++)
463 : 0 : err_count += (status[i] != RTE_DMA_STATUS_SUCCESSFUL);
464 [ # # ]: 0 : if (stats.errors != baseline.errors + err_count)
465 : 0 : ERR_RETURN("'Errors' stats value not as expected, %"PRIu64" not %"PRIu64"\n",
466 : : stats.errors, baseline.errors + err_count);
467 : :
468 : : return 0;
469 : : }
470 : :
471 : : static int
472 : 0 : test_individual_status_query_with_failure(int16_t dev_id, uint16_t vchan, bool fence,
473 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
474 : : {
475 : : /* Test gathering batch statuses one at a time */
476 : : enum rte_dma_status_code status[COMP_BURST_SZ];
477 : : uint16_t invalid_addr_id = 0;
478 : : uint16_t idx;
479 : : uint16_t count = 0, status_count = 0;
480 : : unsigned int j;
481 : 0 : bool error = false;
482 : :
483 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
484 : 0 : int id = rte_dma_copy(dev_id, vchan,
485 : 0 : (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
486 [ # # # # ]: 0 : rte_mbuf_data_iova(dsts[j]), COPY_LEN, OPT_FENCE(j));
487 [ # # ]: 0 : if (id < 0)
488 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
489 [ # # ]: 0 : if (j == fail_idx)
490 : 0 : invalid_addr_id = id;
491 : : }
492 : 0 : rte_dma_submit(dev_id, vchan);
493 : 0 : await_hw(dev_id, vchan);
494 : :
495 : : /* use regular "completed" until we hit error */
496 [ # # ]: 0 : while (!error) {
497 : : uint16_t n = rte_dma_completed(dev_id, vchan, 1, &idx, &error);
498 : 0 : count += n;
499 [ # # ]: 0 : if (n > 1 || count >= COMP_BURST_SZ)
500 : 0 : ERR_RETURN("Error - too many completions got\n");
501 [ # # # # ]: 0 : if (n == 0 && !error)
502 : 0 : ERR_RETURN("Error, unexpectedly got zero completions after %u completed\n",
503 : : count);
504 : : }
505 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
506 : 0 : ERR_RETURN("Error, last successful index not as expected, got %u, expected %u\n",
507 : : idx, invalid_addr_id - 1);
508 : :
509 : : /* use completed_status until we hit end of burst */
510 [ # # ]: 0 : while (count + status_count < COMP_BURST_SZ) {
511 : 0 : uint16_t n = rte_dma_completed_status(dev_id, vchan, 1, &idx,
512 : : &status[status_count]);
513 : 0 : await_hw(dev_id, vchan); /* allow delay to ensure jobs are completed */
514 : 0 : status_count += n;
515 [ # # ]: 0 : if (n != 1)
516 : 0 : ERR_RETURN("Error: unexpected number of completions received, %u, not 1\n",
517 : : n);
518 : : }
519 : :
520 : : /* check for single failure */
521 [ # # ]: 0 : if (status[0] == RTE_DMA_STATUS_SUCCESSFUL)
522 : 0 : ERR_RETURN("Error, unexpected successful DMA transaction\n");
523 [ # # ]: 0 : for (j = 1; j < status_count; j++)
524 : 0 : if (status[j] != RTE_DMA_STATUS_SUCCESSFUL
525 [ # # ]: 0 : && status[j] != RTE_DMA_STATUS_NOT_ATTEMPTED)
526 : 0 : ERR_RETURN("Error, unexpected DMA error reported\n");
527 : :
528 : : return 0;
529 : : }
530 : :
531 : : static int
532 : 0 : test_single_item_status_query_with_failure(int16_t dev_id, uint16_t vchan,
533 : : struct rte_mbuf **srcs, struct rte_mbuf **dsts, unsigned int fail_idx)
534 : : {
535 : : /* When error occurs just collect a single error using "completed_status()"
536 : : * before going to back to completed() calls
537 : : */
538 : : enum rte_dma_status_code status;
539 : : uint16_t invalid_addr_id = 0;
540 : : uint16_t idx;
541 : : uint16_t count, status_count, count2;
542 : : unsigned int j;
543 : 0 : bool error = false;
544 : :
545 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
546 : 0 : int id = rte_dma_copy(dev_id, vchan,
547 : 0 : (j == fail_idx ? 0 : rte_mbuf_data_iova(srcs[j])),
548 [ # # ]: 0 : rte_mbuf_data_iova(dsts[j]), COPY_LEN, 0);
549 [ # # ]: 0 : if (id < 0)
550 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
551 [ # # ]: 0 : if (j == fail_idx)
552 : 0 : invalid_addr_id = id;
553 : : }
554 : 0 : rte_dma_submit(dev_id, vchan);
555 : 0 : await_hw(dev_id, vchan);
556 : :
557 : : /* get up to the error point */
558 : : count = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
559 [ # # ]: 0 : if (count != fail_idx)
560 : 0 : ERR_RETURN("Error with rte_dma_completed for failure test. Got returned %u not %u.\n",
561 : : count, fail_idx);
562 [ # # ]: 0 : if (!error)
563 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. has_error is not set\n",
564 : : fail_idx);
565 [ # # ]: 0 : if (idx != invalid_addr_id - 1)
566 : 0 : ERR_RETURN("Error, missing expected failed copy, %u. Got last idx %u, not %u\n",
567 : : fail_idx, idx, invalid_addr_id - 1);
568 : :
569 : : /* get the error code */
570 : : status_count = rte_dma_completed_status(dev_id, vchan, 1, &idx, &status);
571 [ # # ]: 0 : if (status_count != 1)
572 : 0 : ERR_RETURN("Error with completed_status calls for fail idx %u. Got %u not %u\n",
573 : : fail_idx, status_count, COMP_BURST_SZ - count);
574 [ # # ]: 0 : if (status == RTE_DMA_STATUS_SUCCESSFUL)
575 : 0 : ERR_RETURN("Error with status returned for fail idx %u. First status was not failure\n",
576 : : fail_idx);
577 : :
578 : : /* delay in case time needed after err handled to complete other jobs */
579 : 0 : await_hw(dev_id, vchan);
580 : :
581 : : /* get the rest of the completions without status */
582 : : count2 = rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
583 [ # # ]: 0 : if (error == true)
584 : 0 : ERR_RETURN("Error, got further errors post completed_status() call, for failure case %u.\n",
585 : : fail_idx);
586 [ # # ]: 0 : if (count + status_count + count2 != COMP_BURST_SZ)
587 : 0 : ERR_RETURN("Error, incorrect number of completions received, got %u not %u\n",
588 : : count + status_count + count2, COMP_BURST_SZ);
589 : :
590 : : return 0;
591 : : }
592 : :
593 : : static int
594 : 0 : test_multi_failure(int16_t dev_id, uint16_t vchan, struct rte_mbuf **srcs, struct rte_mbuf **dsts,
595 : : const unsigned int *fail, size_t num_fail)
596 : : {
597 : : /* test having multiple errors in one go */
598 : : enum rte_dma_status_code status[COMP_BURST_SZ];
599 : : unsigned int i, j;
600 : : uint16_t count, err_count = 0;
601 : 0 : bool error = false;
602 : :
603 : : /* enqueue and gather completions in one go */
604 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
605 : 0 : uintptr_t src = rte_mbuf_data_iova(srcs[j]);
606 : : /* set up for failure if the current index is anywhere is the fails array */
607 [ # # ]: 0 : for (i = 0; i < num_fail; i++)
608 [ # # ]: 0 : if (j == fail[i])
609 : : src = 0;
610 : :
611 : 0 : int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
612 : : COPY_LEN, 0);
613 [ # # ]: 0 : if (id < 0)
614 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
615 : : }
616 : 0 : rte_dma_submit(dev_id, vchan);
617 : 0 : await_hw(dev_id, vchan);
618 : :
619 : : count = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ, NULL, status);
620 [ # # ]: 0 : while (count < COMP_BURST_SZ) {
621 : 0 : await_hw(dev_id, vchan);
622 : :
623 : 0 : uint16_t ret = rte_dma_completed_status(dev_id, vchan, COMP_BURST_SZ - count,
624 : 0 : NULL, &status[count]);
625 [ # # ]: 0 : if (ret == 0)
626 : 0 : ERR_RETURN("Error getting all completions for jobs. Got %u of %u\n",
627 : : count, COMP_BURST_SZ);
628 : 0 : count += ret;
629 : : }
630 [ # # ]: 0 : for (i = 0; i < count; i++)
631 [ # # ]: 0 : if (status[i] != RTE_DMA_STATUS_SUCCESSFUL)
632 : 0 : err_count++;
633 : :
634 [ # # ]: 0 : if (err_count != num_fail)
635 : 0 : ERR_RETURN("Error: Invalid number of failed completions returned, %u; expected %zu\n",
636 : : err_count, num_fail);
637 : :
638 : : /* enqueue and gather completions in bursts, but getting errors one at a time */
639 [ # # ]: 0 : for (j = 0; j < COMP_BURST_SZ; j++) {
640 : 0 : uintptr_t src = rte_mbuf_data_iova(srcs[j]);
641 : : /* set up for failure if the current index is anywhere is the fails array */
642 [ # # ]: 0 : for (i = 0; i < num_fail; i++)
643 [ # # ]: 0 : if (j == fail[i])
644 : : src = 0;
645 : :
646 : 0 : int id = rte_dma_copy(dev_id, vchan, src, rte_mbuf_data_iova(dsts[j]),
647 : : COPY_LEN, 0);
648 [ # # ]: 0 : if (id < 0)
649 : 0 : ERR_RETURN("Error with rte_dma_copy for buffer %u\n", j);
650 : : }
651 : : rte_dma_submit(dev_id, vchan);
652 : 0 : await_hw(dev_id, vchan);
653 : :
654 : : count = 0;
655 : : err_count = 0;
656 [ # # ]: 0 : while (count + err_count < COMP_BURST_SZ) {
657 : 0 : count += rte_dma_completed(dev_id, vchan, COMP_BURST_SZ, NULL, &error);
658 [ # # ]: 0 : if (error) {
659 : : uint16_t ret = rte_dma_completed_status(dev_id, vchan, 1,
660 : : NULL, status);
661 [ # # ]: 0 : if (ret != 1)
662 : 0 : ERR_RETURN("Error getting error-status for completions\n");
663 : 0 : err_count += ret;
664 : 0 : await_hw(dev_id, vchan);
665 : : }
666 : : }
667 [ # # ]: 0 : if (err_count != num_fail)
668 : 0 : ERR_RETURN("Error: Incorrect number of failed completions received, got %u not %zu\n",
669 : : err_count, num_fail);
670 : :
671 : : return 0;
672 : : }
673 : :
674 : : static int
675 : 0 : test_completion_status(int16_t dev_id, uint16_t vchan, bool fence)
676 : : {
677 : 0 : const unsigned int fail[] = {0, 7, 14, 15};
678 : : struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
679 : : unsigned int i;
680 : :
681 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
682 : 0 : srcs[i] = rte_pktmbuf_alloc(pool);
683 : 0 : dsts[i] = rte_pktmbuf_alloc(pool);
684 : : }
685 : :
686 [ # # ]: 0 : for (i = 0; i < RTE_DIM(fail); i++) {
687 [ # # ]: 0 : if (test_failure_in_full_burst(dev_id, vchan, fence, srcs, dsts, fail[i]) < 0)
688 : : return -1;
689 : :
690 [ # # ]: 0 : if (test_individual_status_query_with_failure(dev_id, vchan, fence,
691 : : srcs, dsts, fail[i]) < 0)
692 : : return -1;
693 : :
694 : : /* test is run the same fenced, or unfenced, but no harm in running it twice */
695 [ # # ]: 0 : if (test_single_item_status_query_with_failure(dev_id, vchan,
696 : : srcs, dsts, fail[i]) < 0)
697 : : return -1;
698 : : }
699 : :
700 [ # # ]: 0 : if (test_multi_failure(dev_id, vchan, srcs, dsts, fail, RTE_DIM(fail)) < 0)
701 : : return -1;
702 : :
703 [ # # ]: 0 : for (i = 0; i < COMP_BURST_SZ; i++) {
704 : 0 : rte_pktmbuf_free(srcs[i]);
705 : 0 : rte_pktmbuf_free(dsts[i]);
706 : : }
707 : : return 0;
708 : : }
709 : :
710 : : static int
711 : 0 : test_completion_handling(int16_t dev_id, uint16_t vchan)
712 : : {
713 : 0 : return test_completion_status(dev_id, vchan, false) /* without fences */
714 [ # # # # ]: 0 : || test_completion_status(dev_id, vchan, true); /* with fences */
715 : : }
716 : :
717 : : static int
718 : 0 : test_enqueue_fill(int16_t dev_id, uint16_t vchan)
719 : : {
720 : 0 : const unsigned int lengths[] = {8, 64, 1024, 50, 100, 89};
721 : : struct rte_mbuf *dst;
722 : : char *dst_data;
723 : 0 : uint64_t pattern = 0xfedcba9876543210;
724 : : unsigned int i, j;
725 : :
726 : 0 : dst = rte_pktmbuf_alloc(pool);
727 [ # # ]: 0 : if (dst == NULL)
728 : 0 : ERR_RETURN("Failed to allocate mbuf\n");
729 : 0 : dst_data = rte_pktmbuf_mtod(dst, char *);
730 : :
731 [ # # ]: 0 : for (i = 0; i < RTE_DIM(lengths); i++) {
732 : : /* reset dst_data */
733 : 0 : memset(dst_data, 0, rte_pktmbuf_data_len(dst));
734 : :
735 : : /* perform the fill operation */
736 : 0 : int id = rte_dma_fill(dev_id, vchan, pattern,
737 : 0 : rte_pktmbuf_iova(dst), lengths[i], RTE_DMA_OP_FLAG_SUBMIT);
738 [ # # ]: 0 : if (id < 0)
739 : 0 : ERR_RETURN("Error with rte_dma_fill\n");
740 : 0 : await_hw(dev_id, vchan);
741 : :
742 [ # # ]: 0 : if (rte_dma_completed(dev_id, vchan, 1, NULL, NULL) != 1)
743 : 0 : ERR_RETURN("Error: fill operation failed (length: %u)\n", lengths[i]);
744 : : /* check the data from the fill operation is correct */
745 [ # # ]: 0 : for (j = 0; j < lengths[i]; j++) {
746 : 0 : char pat_byte = ((char *)&pattern)[j % 8];
747 [ # # ]: 0 : if (dst_data[j] != pat_byte)
748 : 0 : ERR_RETURN("Error with fill operation (lengths = %u): got (%x), not (%x)\n",
749 : : lengths[i], dst_data[j], pat_byte);
750 : : }
751 : : /* check that the data after the fill operation was not written to */
752 [ # # ]: 0 : for (; j < rte_pktmbuf_data_len(dst); j++)
753 [ # # ]: 0 : if (dst_data[j] != 0)
754 : 0 : ERR_RETURN("Error, fill operation wrote too far (lengths = %u): got (%x), not (%x)\n",
755 : : lengths[i], dst_data[j], 0);
756 : : }
757 : :
758 : 0 : rte_pktmbuf_free(dst);
759 : 0 : return 0;
760 : : }
761 : :
762 : : static int
763 : 0 : test_burst_capacity(int16_t dev_id, uint16_t vchan)
764 : : {
765 : : #define CAP_TEST_BURST_SIZE 64
766 : 0 : const int ring_space = rte_dma_burst_capacity(dev_id, vchan);
767 : : struct rte_mbuf *src, *dst;
768 : : int i, j, iter;
769 : : int cap, ret;
770 : : bool dma_err;
771 : :
772 : 0 : src = rte_pktmbuf_alloc(pool);
773 : 0 : dst = rte_pktmbuf_alloc(pool);
774 : :
775 : : /* to test capacity, we enqueue elements and check capacity is reduced
776 : : * by one each time - rebaselining the expected value after each burst
777 : : * as the capacity is only for a burst. We enqueue multiple bursts to
778 : : * fill up half the ring, before emptying it again. We do this multiple
779 : : * times to ensure that we get to test scenarios where we get ring
780 : : * wrap-around and wrap-around of the ids returned (at UINT16_MAX).
781 : : */
782 [ # # ]: 0 : for (iter = 0; iter < 2 * (((int)UINT16_MAX + 1) / ring_space); iter++) {
783 [ # # ]: 0 : for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
784 : 0 : cap = rte_dma_burst_capacity(dev_id, vchan);
785 : :
786 [ # # ]: 0 : for (j = 0; j < CAP_TEST_BURST_SIZE; j++) {
787 : 0 : ret = rte_dma_copy(dev_id, vchan, rte_pktmbuf_iova(src),
788 : 0 : rte_pktmbuf_iova(dst), COPY_LEN, 0);
789 [ # # ]: 0 : if (ret < 0)
790 : 0 : ERR_RETURN("Error with rte_dmadev_copy\n");
791 : :
792 [ # # ]: 0 : if (rte_dma_burst_capacity(dev_id, vchan) != cap - (j + 1))
793 : 0 : ERR_RETURN("Error, ring capacity did not change as expected\n");
794 : : }
795 [ # # ]: 0 : if (rte_dma_submit(dev_id, vchan) < 0)
796 : 0 : ERR_RETURN("Error, failed to submit burst\n");
797 : :
798 [ # # ]: 0 : if (cap < rte_dma_burst_capacity(dev_id, vchan))
799 : 0 : ERR_RETURN("Error, avail ring capacity has gone up, not down\n");
800 : : }
801 : 0 : await_hw(dev_id, vchan);
802 : :
803 [ # # ]: 0 : for (i = 0; i < (ring_space / (2 * CAP_TEST_BURST_SIZE)) + 1; i++) {
804 : 0 : ret = rte_dma_completed(dev_id, vchan,
805 : : CAP_TEST_BURST_SIZE, NULL, &dma_err);
806 [ # # # # ]: 0 : if (ret != CAP_TEST_BURST_SIZE || dma_err) {
807 : : enum rte_dma_status_code status;
808 : :
809 : : rte_dma_completed_status(dev_id, vchan, 1, NULL, &status);
810 : 0 : ERR_RETURN("Error with rte_dmadev_completed, %u [expected: %u], dma_err = %d, i = %u, iter = %u, status = %u\n",
811 : : ret, CAP_TEST_BURST_SIZE, dma_err, i, iter, status);
812 : : }
813 : : }
814 : 0 : cap = rte_dma_burst_capacity(dev_id, vchan);
815 [ # # ]: 0 : if (cap != ring_space)
816 : 0 : ERR_RETURN("Error, ring capacity has not reset to original value, got %u, expected %u\n",
817 : : cap, ring_space);
818 : : }
819 : :
820 : 0 : rte_pktmbuf_free(src);
821 : 0 : rte_pktmbuf_free(dst);
822 : :
823 : 0 : return 0;
824 : : }
825 : :
826 : : static int
827 : 0 : test_m2d_auto_free(int16_t dev_id, uint16_t vchan)
828 : : {
829 : : #define NR_MBUF 256
830 : : struct rte_mempool_cache *cache;
831 : : struct rte_mbuf *src[NR_MBUF];
832 : : uint32_t buf_cnt1, buf_cnt2;
833 : : struct rte_mempool_ops *ops;
834 : : uint16_t nb_done = 0;
835 : 0 : bool dma_err = false;
836 : : int retry = 100;
837 : : int i, ret = 0;
838 : : rte_iova_t dst;
839 : :
840 [ # # ]: 0 : dst = (rte_iova_t)env_test_param[TEST_PARAM_REMOTE_ADDR];
841 : :
842 : : /* Capture buffer count before allocating source buffer. */
843 [ # # ]: 0 : cache = rte_mempool_default_cache(pool, rte_lcore_id());
844 [ # # ]: 0 : ops = rte_mempool_get_ops(pool->ops_index);
845 : 0 : buf_cnt1 = ops->get_count(pool) + cache->len;
846 : :
847 [ # # ]: 0 : if (rte_pktmbuf_alloc_bulk(pool, src, NR_MBUF) != 0)
848 : 0 : ERR_RETURN("alloc src mbufs failed.\n");
849 : :
850 [ # # ]: 0 : if ((buf_cnt1 - NR_MBUF) != (ops->get_count(pool) + cache->len)) {
851 : : printf("Buffer count check failed.\n");
852 : : ret = -1;
853 : 0 : goto done;
854 : : }
855 : :
856 [ # # ]: 0 : for (i = 0; i < NR_MBUF; i++) {
857 : 0 : ret = rte_dma_copy(dev_id, vchan, rte_mbuf_data_iova(src[i]), dst,
858 : : COPY_LEN, RTE_DMA_OP_FLAG_AUTO_FREE);
859 : :
860 [ # # ]: 0 : if (ret < 0) {
861 : : printf("rte_dma_copy returned error.\n");
862 : 0 : goto done;
863 : : }
864 : : }
865 : :
866 : 0 : rte_dma_submit(dev_id, vchan);
867 : : do {
868 : 0 : nb_done += rte_dma_completed(dev_id, vchan, (NR_MBUF - nb_done), NULL, &dma_err);
869 [ # # ]: 0 : if (dma_err)
870 : : break;
871 : : /* Sleep for 1 millisecond */
872 : 0 : rte_delay_us_sleep(1000);
873 [ # # # # ]: 0 : } while (retry-- && (nb_done < NR_MBUF));
874 : :
875 : 0 : buf_cnt2 = ops->get_count(pool) + cache->len;
876 [ # # # # ]: 0 : if ((buf_cnt1 != buf_cnt2) || dma_err) {
877 : : printf("Free mem to dev buffer test failed.\n");
878 : : ret = -1;
879 : : }
880 : :
881 : 0 : done:
882 : : /* If the test passes source buffer will be freed in hardware. */
883 [ # # ]: 0 : if (ret < 0)
884 : 0 : rte_pktmbuf_free_bulk(&src[nb_done], (NR_MBUF - nb_done));
885 : :
886 : : return ret;
887 : : }
888 : :
889 : : static int
890 : 0 : prepare_m2d_auto_free(int16_t dev_id, uint16_t vchan)
891 : : {
892 : 0 : const struct rte_dma_vchan_conf qconf = {
893 : : .direction = RTE_DMA_DIR_MEM_TO_DEV,
894 : : .nb_desc = TEST_RINGSIZE,
895 : : .auto_free.m2d.pool = pool,
896 : : .dst_port.port_type = RTE_DMA_PORT_PCIE,
897 : : .dst_port.pcie.coreid = 0,
898 : : };
899 : :
900 : : /* Stop the device to reconfigure vchan. */
901 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
902 : 0 : ERR_RETURN("Error stopping device %u\n", dev_id);
903 : :
904 [ # # ]: 0 : if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
905 : 0 : ERR_RETURN("Error with queue configuration\n");
906 : :
907 [ # # ]: 0 : if (rte_dma_start(dev_id) != 0)
908 : 0 : ERR_RETURN("Error with rte_dma_start()\n");
909 : :
910 : : return 0;
911 : : }
912 : :
913 : : static int
914 : 0 : test_dmadev_instance(int16_t dev_id)
915 : : {
916 : : #define CHECK_ERRS true
917 : : struct rte_dma_stats stats;
918 : : struct rte_dma_info info;
919 : 0 : const struct rte_dma_conf conf = { .nb_vchans = 1};
920 : 0 : const struct rte_dma_vchan_conf qconf = {
921 : : .direction = RTE_DMA_DIR_MEM_TO_MEM,
922 : : .nb_desc = TEST_RINGSIZE,
923 : : };
924 : : const int vchan = 0;
925 : : int ret;
926 : :
927 : 0 : ret = rte_dma_info_get(dev_id, &info);
928 [ # # ]: 0 : if (ret != 0)
929 : 0 : ERR_RETURN("Error with rte_dma_info_get()\n");
930 : :
931 : 0 : printf("\n### Test dmadev instance %u [%s]\n",
932 : : dev_id, info.dev_name);
933 : :
934 [ # # ]: 0 : if (info.max_vchans < 1)
935 : 0 : ERR_RETURN("Error, no channels available on device id %u\n", dev_id);
936 : :
937 [ # # ]: 0 : if (rte_dma_configure(dev_id, &conf) != 0)
938 : 0 : ERR_RETURN("Error with rte_dma_configure()\n");
939 : :
940 [ # # ]: 0 : if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0)
941 : 0 : ERR_RETURN("Error with queue configuration\n");
942 : :
943 : 0 : ret = rte_dma_info_get(dev_id, &info);
944 [ # # # # ]: 0 : if (ret != 0 || info.nb_vchans != 1)
945 : 0 : ERR_RETURN("Error, no configured queues reported on device id %u\n", dev_id);
946 : :
947 [ # # ]: 0 : if (rte_dma_start(dev_id) != 0)
948 : 0 : ERR_RETURN("Error with rte_dma_start()\n");
949 : :
950 [ # # ]: 0 : if (rte_dma_stats_get(dev_id, vchan, &stats) != 0)
951 : 0 : ERR_RETURN("Error with rte_dma_stats_get()\n");
952 : :
953 [ # # ]: 0 : if (rte_dma_burst_capacity(dev_id, vchan) < 32)
954 : 0 : ERR_RETURN("Error: Device does not have sufficient burst capacity to run tests");
955 : :
956 [ # # # # : 0 : if (stats.completed != 0 || stats.submitted != 0 || stats.errors != 0)
# # ]
957 : 0 : ERR_RETURN("Error device stats are not all zero: completed = %"PRIu64", "
958 : : "submitted = %"PRIu64", errors = %"PRIu64"\n",
959 : : stats.completed, stats.submitted, stats.errors);
960 : 0 : id_count = 0;
961 : :
962 : : /* create a mempool for running tests */
963 : 0 : pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
964 : : TEST_RINGSIZE * 2, /* n == num elements */
965 : : 32, /* cache size */
966 : : 0, /* priv size */
967 : : 2048, /* data room size */
968 : 0 : info.numa_node);
969 [ # # ]: 0 : if (pool == NULL)
970 : 0 : ERR_RETURN("Error with mempool creation\n");
971 : :
972 : : /* run the test cases, use many iterations to ensure UINT16_MAX id wraparound */
973 [ # # ]: 0 : if (runtest("copy", test_enqueue_copies, 640, dev_id, vchan, CHECK_ERRS) < 0)
974 : 0 : goto err;
975 : :
976 : : /* run tests stopping/starting devices and check jobs still work after restart */
977 [ # # ]: 0 : if (runtest("stop-start", test_stop_start, 1, dev_id, vchan, CHECK_ERRS) < 0)
978 : 0 : goto err;
979 : :
980 : : /* run some burst capacity tests */
981 [ # # ]: 0 : if (rte_dma_burst_capacity(dev_id, vchan) < 64)
982 : : printf("DMA Dev %u: insufficient burst capacity (64 required), skipping tests\n",
983 : : dev_id);
984 [ # # ]: 0 : else if (runtest("burst capacity", test_burst_capacity, 1, dev_id, vchan, CHECK_ERRS) < 0)
985 : 0 : goto err;
986 : :
987 : : /* to test error handling we can provide null pointers for source or dest in copies. This
988 : : * requires VA mode in DPDK, since NULL(0) is a valid physical address.
989 : : * We also need hardware that can report errors back.
990 : : */
991 [ # # ]: 0 : if (rte_eal_iova_mode() != RTE_IOVA_VA)
992 : : printf("DMA Dev %u: DPDK not in VA mode, skipping error handling tests\n", dev_id);
993 [ # # ]: 0 : else if ((info.dev_capa & RTE_DMA_CAPA_HANDLES_ERRORS) == 0)
994 : : printf("DMA Dev %u: device does not report errors, skipping error handling tests\n",
995 : : dev_id);
996 [ # # ]: 0 : else if (runtest("error handling", test_completion_handling, 1,
997 : : dev_id, vchan, !CHECK_ERRS) < 0)
998 : 0 : goto err;
999 : :
1000 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_OPS_FILL) == 0)
1001 : : printf("DMA Dev %u: No device fill support, skipping fill tests\n", dev_id);
1002 [ # # ]: 0 : else if (runtest("fill", test_enqueue_fill, 1, dev_id, vchan, CHECK_ERRS) < 0)
1003 : 0 : goto err;
1004 : :
1005 [ # # ]: 0 : if ((info.dev_capa & RTE_DMA_CAPA_M2D_AUTO_FREE) &&
1006 [ # # ]: 0 : dma_add_test[TEST_M2D_AUTO_FREE].enabled == true) {
1007 [ # # ]: 0 : if (prepare_m2d_auto_free(dev_id, vchan) != 0)
1008 : 0 : goto err;
1009 [ # # ]: 0 : if (runtest("m2d_auto_free", test_m2d_auto_free, 128, dev_id, vchan,
1010 : : CHECK_ERRS) < 0)
1011 : 0 : goto err;
1012 : : }
1013 : :
1014 : 0 : rte_mempool_free(pool);
1015 : :
1016 [ # # ]: 0 : if (rte_dma_stop(dev_id) < 0)
1017 : 0 : ERR_RETURN("Error stopping device %u\n", dev_id);
1018 : :
1019 : 0 : rte_dma_stats_reset(dev_id, vchan);
1020 : 0 : return 0;
1021 : :
1022 : 0 : err:
1023 : 0 : rte_mempool_free(pool);
1024 : 0 : rte_dma_stop(dev_id);
1025 : 0 : return -1;
1026 : : }
1027 : :
1028 : : static int
1029 : 0 : test_apis(void)
1030 : : {
1031 : : const char *pmd = "dma_skeleton";
1032 : : int id;
1033 : : int ret;
1034 : :
1035 : : /* attempt to create skeleton instance - ignore errors due to one being already present */
1036 : 0 : rte_vdev_init(pmd, NULL);
1037 : 0 : id = rte_dma_get_dev_id_by_name(pmd);
1038 [ # # ]: 0 : if (id < 0)
1039 : : return TEST_SKIPPED;
1040 : : printf("\n### Test dmadev infrastructure using skeleton driver\n");
1041 : 0 : ret = test_dma_api(id);
1042 : :
1043 : 0 : return ret;
1044 : : }
1045 : :
1046 : : static void
1047 : 0 : parse_dma_env_var(void)
1048 : : {
1049 : 0 : char *dma_env_param_str = getenv("DPDK_ADD_DMA_TEST_PARAM");
1050 : 0 : char *dma_env_test_str = getenv("DPDK_ADD_DMA_TEST");
1051 : 0 : char *params[32] = {0};
1052 : 0 : char *tests[32] = {0};
1053 : 0 : char *var[2] = {0};
1054 : : int n_var = 0;
1055 : : int i, j;
1056 : :
1057 : : /* Additional test from commandline. */
1058 [ # # # # ]: 0 : if (dma_env_test_str && strlen(dma_env_test_str) > 0) {
1059 : 0 : n_var = rte_strsplit(dma_env_test_str, strlen(dma_env_test_str), tests,
1060 : : RTE_DIM(tests), ',');
1061 [ # # ]: 0 : for (i = 0; i < n_var; i++) {
1062 [ # # ]: 0 : for (j = 0; j < TEST_MAX; j++) {
1063 [ # # ]: 0 : if (!strcmp(tests[i], dma_add_test[j].name))
1064 : 0 : dma_add_test[j].enabled = true;
1065 : : }
1066 : : }
1067 : : }
1068 : :
1069 : : /* Commandline variables for test */
1070 [ # # # # ]: 0 : if (dma_env_param_str && strlen(dma_env_param_str) > 0) {
1071 : 0 : n_var = rte_strsplit(dma_env_param_str, strlen(dma_env_param_str), params,
1072 : : RTE_DIM(params), ',');
1073 [ # # ]: 0 : for (i = 0; i < n_var; i++) {
1074 : 0 : rte_strsplit(params[i], strlen(params[i]), var, RTE_DIM(var), '=');
1075 [ # # ]: 0 : for (j = 0; j < TEST_PARAM_MAX; j++) {
1076 [ # # ]: 0 : if (!strcmp(var[0], dma_test_param[j]))
1077 : 0 : env_test_param[j] = strtoul(var[1], NULL, 16);
1078 : : }
1079 : : }
1080 : : }
1081 : 0 : }
1082 : :
1083 : : static int
1084 : 0 : test_dma(void)
1085 : : {
1086 : : int i;
1087 : :
1088 : 0 : parse_dma_env_var();
1089 : :
1090 : : /* basic sanity on dmadev infrastructure */
1091 [ # # ]: 0 : if (test_apis() < 0)
1092 : 0 : ERR_RETURN("Error performing API tests\n");
1093 : :
1094 [ # # ]: 0 : if (rte_dma_count_avail() == 0)
1095 : : return TEST_SKIPPED;
1096 : :
1097 [ # # ]: 0 : RTE_DMA_FOREACH_DEV(i)
1098 [ # # ]: 0 : if (test_dmadev_instance(i) < 0)
1099 : 0 : ERR_RETURN("Error, test failure for device %d\n", i);
1100 : :
1101 : : return 0;
1102 : : }
1103 : :
1104 : 235 : REGISTER_DRIVER_TEST(dmadev_autotest, test_dma);
|