Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2001-2023 Intel Corporation
3 : : */
4 : :
5 : : #include "cpfl_controlq.h"
6 : : #include "base/idpf_controlq.h"
7 : : #include "rte_common.h"
8 : :
9 : : /**
10 : : * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
11 : : * @qinfo: pointer to create control queue info struct
12 : : *
13 : : * Verify that DMA parameter of each DMA memory struct is present and
14 : : * consistent with control queue parameters
15 : : */
16 : : static inline int
17 : 0 : cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
18 : : {
19 : : struct idpf_dma_mem *ring = &qinfo->ring_mem;
20 : : struct idpf_dma_mem *buf = &qinfo->buf_mem;
21 : :
22 [ # # # # ]: 0 : if (!ring->va || !ring->size)
23 : : return -EINVAL;
24 : :
25 [ # # ]: 0 : if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
26 : : return -EINVAL;
27 : :
28 : : /* no need for buffer checks for TX queues */
29 [ # # ]: 0 : if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
30 [ # # ]: 0 : qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
31 : : qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
32 : : return 0;
33 : :
34 [ # # # # ]: 0 : if (!buf->va || !buf->size)
35 : : return -EINVAL;
36 : :
37 : : /* accommodate different types of rx ring buffer sizes */
38 [ # # ]: 0 : if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
39 [ # # # # ]: 0 : buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
40 [ # # ]: 0 : (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
41 : : buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
42 : 0 : return -EINVAL;
43 : :
44 : : return 0;
45 : : }
46 : :
47 : : /**
48 : : * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
49 : : * @hw: pointer to hw struct
50 : : * @cq: pointer to control queue struct
51 : : * @qinfo: pointer to create queue info struct
52 : : *
53 : : * The CP takes care of all DMA memory allocations. Store the allocated memory
54 : : * information for the descriptor ring and buffers. If the memory for either the
55 : : * descriptor ring or the buffers is not allocated properly and/or inconsistent
56 : : * with the control queue parameters, this routine will free the memory for
57 : : * both the descriptors and the buffers
58 : : */
59 : : int
60 : 0 : cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
61 : : struct cpfl_ctlq_create_info *qinfo)
62 : : {
63 : : int ret_code = 0;
64 : : unsigned int elem_size;
65 : : int i = 0;
66 : :
67 : 0 : ret_code = cpfl_check_dma_mem_parameters(qinfo);
68 [ # # ]: 0 : if (ret_code)
69 : : /* TODO: Log an error message per CP */
70 : 0 : goto err;
71 : :
72 : 0 : cq->desc_ring.va = qinfo->ring_mem.va;
73 : 0 : cq->desc_ring.pa = qinfo->ring_mem.pa;
74 : 0 : cq->desc_ring.size = qinfo->ring_mem.size;
75 : :
76 [ # # # ]: 0 : switch (cq->cq_type) {
77 : 0 : case IDPF_CTLQ_TYPE_MAILBOX_RX:
78 : : case IDPF_CTLQ_TYPE_CONFIG_RX:
79 : : case IDPF_CTLQ_TYPE_EVENT_RX:
80 : : case IDPF_CTLQ_TYPE_RDMA_RX:
81 : : /* Only receive queues will have allocated buffers
82 : : * during init. CP allocates one big chunk of DMA
83 : : * region who size is equal to ring_len * buff_size.
84 : : * In CPFLib, the block gets broken down to multiple
85 : : * smaller blocks that actually gets programmed in the hardware.
86 : : */
87 : :
88 : 0 : cq->bi.rx_buff = (struct idpf_dma_mem **)
89 : 0 : idpf_calloc(hw, cq->ring_size,
90 : : sizeof(struct idpf_dma_mem *));
91 [ # # ]: 0 : if (!cq->bi.rx_buff) {
92 : : ret_code = -ENOMEM;
93 : : /* TODO: Log an error message per CP */
94 : 0 : goto err;
95 : : }
96 : :
97 : 0 : elem_size = qinfo->buf_size;
98 [ # # ]: 0 : for (i = 0; i < cq->ring_size; i++) {
99 : 0 : cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
100 : : (hw, 1,
101 : : sizeof(struct idpf_dma_mem));
102 [ # # ]: 0 : if (!cq->bi.rx_buff[i]) {
103 : : ret_code = -ENOMEM;
104 : 0 : goto free_rx_buffs;
105 : : }
106 : 0 : cq->bi.rx_buff[i]->va =
107 : 0 : (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
108 : 0 : cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
109 : : (i * elem_size);
110 : 0 : cq->bi.rx_buff[i]->size = elem_size;
111 : : }
112 : : break;
113 : : case IDPF_CTLQ_TYPE_MAILBOX_TX:
114 : : case IDPF_CTLQ_TYPE_CONFIG_TX:
115 : : case IDPF_CTLQ_TYPE_RDMA_TX:
116 : : case IDPF_CTLQ_TYPE_RDMA_COMPL:
117 : : break;
118 : 0 : default:
119 : : ret_code = -EINVAL;
120 : : }
121 : :
122 : : return ret_code;
123 : :
124 : : free_rx_buffs:
125 : 0 : i--;
126 [ # # ]: 0 : for (; i >= 0; i--)
127 : 0 : idpf_free(hw, cq->bi.rx_buff[i]);
128 : :
129 [ # # ]: 0 : if (!cq->bi.rx_buff)
130 : 0 : idpf_free(hw, cq->bi.rx_buff);
131 : :
132 : 0 : err:
133 : : return ret_code;
134 : : }
135 : :
136 : : /**
137 : : * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
138 : : * @cq: pointer to the specific Control queue
139 : : *
140 : : * Record the address of the receive queue DMA buffers in the descriptors.
141 : : * The buffers must have been previously allocated.
142 : : */
143 : : static void
144 : 0 : cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
145 : : {
146 : : int i = 0;
147 : :
148 [ # # ]: 0 : for (i = 0; i < cq->ring_size; i++) {
149 : 0 : struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
150 : 0 : struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
151 : :
152 : : /* No buffer to post to descriptor, continue */
153 [ # # ]: 0 : if (!bi)
154 : 0 : continue;
155 : :
156 : 0 : desc->flags =
157 : : CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
158 : 0 : desc->opcode = 0;
159 : 0 : desc->datalen = CPU_TO_LE16(bi->size);
160 : 0 : desc->ret_val = 0;
161 : 0 : desc->cookie_high = 0;
162 : 0 : desc->cookie_low = 0;
163 : 0 : desc->params.indirect.addr_high =
164 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
165 : 0 : desc->params.indirect.addr_low =
166 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
167 : 0 : desc->params.indirect.param0 = 0;
168 : 0 : desc->params.indirect.param1 = 0;
169 : : }
170 : 0 : }
171 : :
172 : : /**
173 : : * cpfl_ctlq_setup_regs - initialize control queue registers
174 : : * @cq: pointer to the specific control queue
175 : : * @q_create_info: structs containing info for each queue to be initialized
176 : : */
177 : : static void
178 : : cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
179 : : {
180 : : /* set control queue registers in our local struct */
181 : 0 : cq->reg.head = q_create_info->reg.head;
182 : 0 : cq->reg.tail = q_create_info->reg.tail;
183 : 0 : cq->reg.len = q_create_info->reg.len;
184 : 0 : cq->reg.bah = q_create_info->reg.bah;
185 : 0 : cq->reg.bal = q_create_info->reg.bal;
186 : 0 : cq->reg.len_mask = q_create_info->reg.len_mask;
187 : 0 : cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
188 : 0 : cq->reg.head_mask = q_create_info->reg.head_mask;
189 : : }
190 : :
191 : : /**
192 : : * cpfl_ctlq_init_regs - Initialize control queue registers
193 : : * @hw: pointer to hw struct
194 : : * @cq: pointer to the specific Control queue
195 : : * @is_rxq: true if receive control queue, false otherwise
196 : : *
197 : : * Initialize registers. The caller is expected to have already initialized the
198 : : * descriptor ring memory and buffer memory
199 : : */
200 : : static void
201 : 0 : cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
202 : : {
203 : : /* Update tail to post pre-allocated buffers for rx queues */
204 [ # # ]: 0 : if (is_rxq)
205 : 0 : wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
206 : :
207 : : /* For non-Mailbox control queues only TAIL need to be set */
208 [ # # ]: 0 : if (cq->q_id != -1)
209 : : return;
210 : :
211 : : /* Clear Head for both send or receive */
212 : 0 : wr32(hw, cq->reg.head, 0);
213 : :
214 : : /* set starting point */
215 : 0 : wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
216 : 0 : wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
217 : 0 : wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
218 : : }
219 : :
220 : : /**
221 : : * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
222 : : * @hw: context info for the callback
223 : : * @cq: pointer to the specific control queue
224 : : *
225 : : * DMA buffers are released by the CP itself
226 : : */
227 : : static void
228 : 0 : cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
229 : : {
230 : : int i;
231 : :
232 [ # # ]: 0 : if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
233 : : cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
234 [ # # ]: 0 : for (i = 0; i < cq->ring_size; i++)
235 : 0 : idpf_free(hw, cq->bi.rx_buff[i]);
236 : : /* free the buffer header */
237 : 0 : idpf_free(hw, cq->bi.rx_buff);
238 : : } else {
239 : 0 : idpf_free(hw, cq->bi.tx_msg);
240 : : }
241 : 0 : }
242 : :
243 : : /**
244 : : * cpfl_ctlq_add - add one control queue
245 : : * @hw: pointer to hardware struct
246 : : * @qinfo: info for queue to be created
247 : : * @cq_out: (output) double pointer to control queue to be created
248 : : *
249 : : * Allocate and initialize a control queue and add it to the control queue list.
250 : : * The cq parameter will be allocated/initialized and passed back to the caller
251 : : * if no errors occur.
252 : : */
253 : : int
254 : 0 : cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
255 : : struct idpf_ctlq_info **cq_out)
256 : : {
257 : : struct idpf_ctlq_info *cq;
258 : : bool is_rxq = false;
259 : : int status = 0;
260 : :
261 [ # # # # : 0 : if (!qinfo->len || !qinfo->buf_size ||
# # ]
262 [ # # ]: 0 : qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
263 : : qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
264 : : return -EINVAL;
265 : :
266 : : cq = (struct idpf_ctlq_info *)
267 : 0 : idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
268 : :
269 [ # # ]: 0 : if (!cq)
270 : : return -ENOMEM;
271 : :
272 : 0 : cq->cq_type = qinfo->type;
273 : 0 : cq->q_id = qinfo->id;
274 : 0 : cq->buf_size = qinfo->buf_size;
275 : 0 : cq->ring_size = qinfo->len;
276 : :
277 : 0 : cq->next_to_use = 0;
278 : 0 : cq->next_to_clean = 0;
279 : 0 : cq->next_to_post = cq->ring_size - 1;
280 : :
281 [ # # # ]: 0 : switch (qinfo->type) {
282 : 0 : case IDPF_CTLQ_TYPE_EVENT_RX:
283 : : case IDPF_CTLQ_TYPE_CONFIG_RX:
284 : : case IDPF_CTLQ_TYPE_MAILBOX_RX:
285 : : is_rxq = true;
286 : : /* fallthrough */
287 : 0 : case IDPF_CTLQ_TYPE_CONFIG_TX:
288 : : case IDPF_CTLQ_TYPE_MAILBOX_TX:
289 : 0 : status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
290 : : break;
291 : :
292 : : default:
293 : : status = -EINVAL;
294 : : break;
295 : : }
296 : :
297 [ # # ]: 0 : if (status)
298 : 0 : goto init_free_q;
299 : :
300 [ # # ]: 0 : if (is_rxq) {
301 : 0 : cpfl_ctlq_init_rxq_bufs(cq);
302 : : } else {
303 : : /* Allocate the array of msg pointers for TX queues */
304 : 0 : cq->bi.tx_msg = (struct idpf_ctlq_msg **)
305 : 0 : idpf_calloc(hw, qinfo->len,
306 : : sizeof(struct idpf_ctlq_msg *));
307 [ # # ]: 0 : if (!cq->bi.tx_msg) {
308 : : status = -ENOMEM;
309 : 0 : goto init_dealloc_q_mem;
310 : : }
311 : : }
312 : :
313 : : cpfl_ctlq_setup_regs(cq, qinfo);
314 : :
315 : 0 : cpfl_ctlq_init_regs(hw, cq, is_rxq);
316 : :
317 : : idpf_init_lock(&cq->cq_lock);
318 : :
319 [ # # ]: 0 : LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
320 : :
321 : 0 : *cq_out = cq;
322 : 0 : return status;
323 : :
324 : : init_dealloc_q_mem:
325 : : /* free ring buffers and the ring itself */
326 : 0 : cpfl_ctlq_dealloc_ring_res(hw, cq);
327 : 0 : init_free_q:
328 : 0 : idpf_free(hw, cq);
329 : : cq = NULL;
330 : :
331 : 0 : return status;
332 : : }
333 : :
334 : : /**
335 : : * cpfl_ctlq_send - send command to Control Queue (CTQ)
336 : : * @hw: pointer to hw struct
337 : : * @cq: handle to control queue struct to send on
338 : : * @num_q_msg: number of messages to send on control queue
339 : : * @q_msg: pointer to array of queue messages to be sent
340 : : *
341 : : * The caller is expected to allocate DMAable buffers and pass them to the
342 : : * send routine via the q_msg struct / control queue specific data struct.
343 : : * The control queue will hold a reference to each send message until
344 : : * the completion for that message has been cleaned.
345 : : */
346 : : int
347 : 0 : cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
348 : : uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
349 : : {
350 : : struct idpf_ctlq_desc *desc;
351 : : int num_desc_avail = 0;
352 : : int status = 0;
353 : : int i = 0;
354 : :
355 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
356 : : return -ENOBUFS;
357 : :
358 : 0 : idpf_acquire_lock(&cq->cq_lock);
359 : :
360 : : /* Ensure there are enough descriptors to send all messages */
361 [ # # ]: 0 : num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
362 [ # # # # ]: 0 : if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
363 : : status = -ENOSPC;
364 : 0 : goto sq_send_command_out;
365 : : }
366 : :
367 [ # # ]: 0 : for (i = 0; i < num_q_msg; i++) {
368 : 0 : struct idpf_ctlq_msg *msg = &q_msg[i];
369 : :
370 : 0 : desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
371 : 0 : desc->opcode = CPU_TO_LE16(msg->opcode);
372 : 0 : desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
373 : 0 : desc->cookie_high =
374 : 0 : CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
375 : 0 : desc->cookie_low =
376 : 0 : CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
377 : 0 : desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
378 : : IDPF_CTLQ_FLAG_HOST_ID_S);
379 [ # # ]: 0 : if (msg->data_len) {
380 : 0 : struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
381 : :
382 : 0 : desc->datalen |= CPU_TO_LE16(msg->data_len);
383 : 0 : desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
384 : 0 : desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
385 : : /* Update the address values in the desc with the pa
386 : : * value for respective buffer
387 : : */
388 : 0 : desc->params.indirect.addr_high =
389 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
390 : 0 : desc->params.indirect.addr_low =
391 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
392 [ # # ]: 0 : idpf_memcpy(&desc->params, msg->ctx.indirect.context,
393 : : IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
394 : : } else {
395 [ # # ]: 0 : idpf_memcpy(&desc->params, msg->ctx.direct,
396 : : IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
397 : : }
398 : :
399 : : /* Store buffer info */
400 : 0 : cq->bi.tx_msg[cq->next_to_use] = msg;
401 : 0 : (cq->next_to_use)++;
402 [ # # ]: 0 : if (cq->next_to_use == cq->ring_size)
403 : 0 : cq->next_to_use = 0;
404 : : }
405 : :
406 : : /* Force memory write to complete before letting hardware
407 : : * know that there are new descriptors to fetch.
408 : : */
409 : 0 : idpf_wmb();
410 : 0 : wr32(hw, cq->reg.tail, cq->next_to_use);
411 : :
412 : 0 : sq_send_command_out:
413 : : idpf_release_lock(&cq->cq_lock);
414 : :
415 : 0 : return status;
416 : : }
417 : :
418 : : /**
419 : : * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
420 : : * back for the requested queue
421 : : * @cq: pointer to the specific Control queue
422 : : * @clean_count: (input|output) number of descriptors to clean as input, and
423 : : * number of descriptors actually cleaned as output
424 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
425 : : * to be allocated by caller
426 : : * @force: (input) clean descriptors which were not done yet. Use with caution
427 : : * in kernel mode only
428 : : *
429 : : * Returns an array of message pointers associated with the cleaned
430 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
431 : : * descriptors. The status will be returned for each; any messages that failed
432 : : * to send will have a non-zero status. The caller is expected to free original
433 : : * ctlq_msgs and free or reuse the DMA buffers.
434 : : */
435 : : static int
436 : 0 : __cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
437 : : struct idpf_ctlq_msg *msg_status[], bool force)
438 : : {
439 : : struct idpf_ctlq_desc *desc;
440 : : uint16_t i = 0, num_to_clean;
441 : : uint16_t ntc, desc_err;
442 : : int ret = 0;
443 : :
444 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
445 : : return -ENOBUFS;
446 : :
447 [ # # ]: 0 : if (*clean_count == 0)
448 : : return 0;
449 [ # # ]: 0 : if (*clean_count > cq->ring_size)
450 : : return -EINVAL;
451 : :
452 : 0 : idpf_acquire_lock(&cq->cq_lock);
453 : 0 : ntc = cq->next_to_clean;
454 : 0 : num_to_clean = *clean_count;
455 : :
456 [ # # ]: 0 : for (i = 0; i < num_to_clean; i++) {
457 : : /* Fetch next descriptor and check if marked as done */
458 : 0 : desc = IDPF_CTLQ_DESC(cq, ntc);
459 [ # # # # ]: 0 : if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
460 : : break;
461 : :
462 : 0 : desc_err = LE16_TO_CPU(desc->ret_val);
463 [ # # ]: 0 : if (desc_err) {
464 : : /* strip off FW internal code */
465 : 0 : desc_err &= 0xff;
466 : : }
467 : :
468 : 0 : msg_status[i] = cq->bi.tx_msg[ntc];
469 [ # # ]: 0 : if (!msg_status[i])
470 : : break;
471 : 0 : msg_status[i]->status = desc_err;
472 [ # # ]: 0 : cq->bi.tx_msg[ntc] = NULL;
473 : : /* Zero out any stale data */
474 : : idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
475 : 0 : ntc++;
476 [ # # ]: 0 : if (ntc == cq->ring_size)
477 : : ntc = 0;
478 : : }
479 : :
480 : 0 : cq->next_to_clean = ntc;
481 : : idpf_release_lock(&cq->cq_lock);
482 : :
483 : : /* Return number of descriptors actually cleaned */
484 : 0 : *clean_count = i;
485 : :
486 : 0 : return ret;
487 : : }
488 : :
489 : : /**
490 : : * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
491 : : * requested queue
492 : : * @cq: pointer to the specific Control queue
493 : : * @clean_count: (input|output) number of descriptors to clean as input, and
494 : : * number of descriptors actually cleaned as output
495 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
496 : : * to be allocated by caller
497 : : *
498 : : * Returns an array of message pointers associated with the cleaned
499 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
500 : : * descriptors. The status will be returned for each; any messages that failed
501 : : * to send will have a non-zero status. The caller is expected to free original
502 : : * ctlq_msgs and free or reuse the DMA buffers.
503 : : */
504 : : int
505 : 0 : cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
506 : : struct idpf_ctlq_msg *msg_status[])
507 : : {
508 : 0 : return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
509 : : }
510 : :
511 : : /**
512 : : * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
513 : : * @hw: pointer to hw struct
514 : : * @cq: pointer to control queue handle
515 : : * @buff_count: (input|output) input is number of buffers caller is trying to
516 : : * return; output is number of buffers that were not posted
517 : : * @buffs: array of pointers to dma mem structs to be given to hardware
518 : : *
519 : : * Caller uses this function to return DMA buffers to the descriptor ring after
520 : : * consuming them; buff_count will be the number of buffers.
521 : : *
522 : : * Note: this function needs to be called after a receive call even
523 : : * if there are no DMA buffers to be returned, i.e. buff_count = 0,
524 : : * buffs = NULL to support direct commands
525 : : */
526 : : int
527 : 0 : cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
528 : : uint16_t *buff_count, struct idpf_dma_mem **buffs)
529 : : {
530 : : struct idpf_ctlq_desc *desc;
531 : 0 : uint16_t ntp = cq->next_to_post;
532 : : bool buffs_avail = false;
533 : 0 : uint16_t tbp = ntp + 1;
534 : : int status = 0;
535 : : int i = 0;
536 : :
537 [ # # ]: 0 : if (*buff_count > cq->ring_size)
538 : : return -EINVAL;
539 : :
540 [ # # ]: 0 : if (*buff_count > 0)
541 : : buffs_avail = true;
542 : 0 : idpf_acquire_lock(&cq->cq_lock);
543 [ # # ]: 0 : if (tbp >= cq->ring_size)
544 : : tbp = 0;
545 : :
546 [ # # ]: 0 : if (tbp == cq->next_to_clean)
547 : : /* Nothing to do */
548 : 0 : goto post_buffs_out;
549 : :
550 : : /* Post buffers for as many as provided or up until the last one used */
551 [ # # ]: 0 : while (ntp != cq->next_to_clean) {
552 : 0 : desc = IDPF_CTLQ_DESC(cq, ntp);
553 [ # # ]: 0 : if (cq->bi.rx_buff[ntp])
554 : 0 : goto fill_desc;
555 [ # # ]: 0 : if (!buffs_avail) {
556 : : /* If the caller hasn't given us any buffers or
557 : : * there are none left, search the ring itself
558 : : * for an available buffer to move to this
559 : : * entry starting at the next entry in the ring
560 : : */
561 : 0 : tbp = ntp + 1;
562 : : /* Wrap ring if necessary */
563 [ # # ]: 0 : if (tbp >= cq->ring_size)
564 : : tbp = 0;
565 : :
566 [ # # ]: 0 : while (tbp != cq->next_to_clean) {
567 [ # # ]: 0 : if (cq->bi.rx_buff[tbp]) {
568 : 0 : cq->bi.rx_buff[ntp] =
569 : : cq->bi.rx_buff[tbp];
570 : 0 : cq->bi.rx_buff[tbp] = NULL;
571 : :
572 : : /* Found a buffer, no need to
573 : : * search anymore
574 : : */
575 : 0 : break;
576 : : }
577 : :
578 : : /* Wrap ring if necessary */
579 : 0 : tbp++;
580 [ # # ]: 0 : if (tbp >= cq->ring_size)
581 : : tbp = 0;
582 : : }
583 : :
584 [ # # ]: 0 : if (tbp == cq->next_to_clean)
585 : 0 : goto post_buffs_out;
586 : : } else {
587 : : /* Give back pointer to DMA buffer */
588 : 0 : cq->bi.rx_buff[ntp] = buffs[i];
589 : 0 : i++;
590 : :
591 [ # # ]: 0 : if (i >= *buff_count)
592 : : buffs_avail = false;
593 : : }
594 : :
595 : 0 : fill_desc:
596 : 0 : desc->flags =
597 : : CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
598 : :
599 : : /* Post buffers to descriptor */
600 : 0 : desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
601 : 0 : desc->params.indirect.addr_high =
602 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
603 : 0 : desc->params.indirect.addr_low =
604 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
605 : :
606 : 0 : ntp++;
607 [ # # ]: 0 : if (ntp == cq->ring_size)
608 : : ntp = 0;
609 : : }
610 : :
611 : 0 : post_buffs_out:
612 : : /* Only update tail if buffers were actually posted */
613 [ # # ]: 0 : if (cq->next_to_post != ntp) {
614 [ # # ]: 0 : if (ntp)
615 : : /* Update next_to_post to ntp - 1 since current ntp
616 : : * will not have a buffer
617 : : */
618 : 0 : cq->next_to_post = ntp - 1;
619 : : else
620 : : /* Wrap to end of end ring since current ntp is 0 */
621 : 0 : cq->next_to_post = cq->ring_size - 1;
622 : :
623 : 0 : wr32(hw, cq->reg.tail, cq->next_to_post);
624 : : }
625 : :
626 : : idpf_release_lock(&cq->cq_lock);
627 : : /* return the number of buffers that were not posted */
628 : 0 : *buff_count = *buff_count - i;
629 : :
630 : 0 : return status;
631 : : }
632 : :
633 : : /**
634 : : * cpfl_ctlq_recv - receive control queue message call back
635 : : * @cq: pointer to control queue handle to receive on
636 : : * @num_q_msg: (input|output) input number of messages that should be received;
637 : : * output number of messages actually received
638 : : * @q_msg: (output) array of received control queue messages on this q;
639 : : * needs to be pre-allocated by caller for as many messages as requested
640 : : *
641 : : * Called by interrupt handler or polling mechanism. Caller is expected
642 : : * to free buffers
643 : : */
644 : : int
645 : 0 : cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
646 : : struct idpf_ctlq_msg *q_msg)
647 : : {
648 : : uint16_t num_to_clean, ntc, ret_val, flags;
649 : : struct idpf_ctlq_desc *desc;
650 : : int ret_code = 0;
651 : : uint16_t i = 0;
652 : :
653 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
654 : : return -ENOBUFS;
655 : :
656 [ # # ]: 0 : if (*num_q_msg == 0)
657 : : return 0;
658 [ # # ]: 0 : else if (*num_q_msg > cq->ring_size)
659 : : return -EINVAL;
660 : :
661 : : /* take the lock before we start messing with the ring */
662 : 0 : idpf_acquire_lock(&cq->cq_lock);
663 : 0 : ntc = cq->next_to_clean;
664 : 0 : num_to_clean = *num_q_msg;
665 : :
666 [ # # ]: 0 : for (i = 0; i < num_to_clean; i++) {
667 : : /* Fetch next descriptor and check if marked as done */
668 : 0 : desc = IDPF_CTLQ_DESC(cq, ntc);
669 : 0 : flags = LE16_TO_CPU(desc->flags);
670 [ # # ]: 0 : if (!(flags & IDPF_CTLQ_FLAG_DD))
671 : : break;
672 : :
673 : 0 : ret_val = LE16_TO_CPU(desc->ret_val);
674 : 0 : q_msg[i].vmvf_type = (flags &
675 : : (IDPF_CTLQ_FLAG_FTYPE_VM |
676 : 0 : IDPF_CTLQ_FLAG_FTYPE_PF)) >>
677 : : IDPF_CTLQ_FLAG_FTYPE_S;
678 : :
679 [ # # ]: 0 : if (flags & IDPF_CTLQ_FLAG_ERR)
680 : : ret_code = -EBADMSG;
681 : :
682 : 0 : q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
683 : 0 : q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
684 : 0 : q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
685 : 0 : q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
686 : 0 : q_msg[i].status = ret_val;
687 : :
688 [ # # ]: 0 : if (desc->datalen) {
689 [ # # ]: 0 : idpf_memcpy(q_msg[i].ctx.indirect.context,
690 : : &desc->params.indirect,
691 : : IDPF_INDIRECT_CTX_SIZE,
692 : : IDPF_DMA_TO_NONDMA);
693 : :
694 : : /* Assign pointer to dma buffer to ctlq_msg array
695 : : * to be given to upper layer
696 : : */
697 : 0 : q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
698 : :
699 : : /* Zero out pointer to DMA buffer info;
700 : : * will be repopulated by post buffers API
701 : : */
702 : 0 : cq->bi.rx_buff[ntc] = NULL;
703 : : } else {
704 [ # # ]: 0 : idpf_memcpy(q_msg[i].ctx.direct,
705 : : desc->params.raw,
706 : : IDPF_DIRECT_CTX_SIZE,
707 : : IDPF_DMA_TO_NONDMA);
708 : : }
709 : :
710 : : /* Zero out stale data in descriptor */
711 : : idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
712 : : IDPF_DMA_MEM);
713 : :
714 : 0 : ntc++;
715 [ # # ]: 0 : if (ntc == cq->ring_size)
716 : : ntc = 0;
717 : : };
718 : :
719 : 0 : cq->next_to_clean = ntc;
720 : : idpf_release_lock(&cq->cq_lock);
721 : 0 : *num_q_msg = i;
722 [ # # ]: 0 : if (*num_q_msg == 0)
723 : : ret_code = -ENOMSG;
724 : :
725 : : return ret_code;
726 : : }
727 : :
728 : : int
729 : 0 : cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
730 : : struct idpf_ctlq_info **cq)
731 : : {
732 : 0 : return cpfl_ctlq_add(hw, qinfo, cq);
733 : : }
734 : :
735 : : /**
736 : : * cpfl_ctlq_shutdown - shutdown the CQ
737 : : * The main shutdown routine for any controq queue
738 : : */
739 : : static void
740 : 0 : cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
741 : : {
742 : 0 : idpf_acquire_lock(&cq->cq_lock);
743 : :
744 [ # # ]: 0 : if (!cq->ring_size)
745 : 0 : goto shutdown_sq_out;
746 : :
747 : : /* free ring buffers and the ring itself */
748 : 0 : cpfl_ctlq_dealloc_ring_res(hw, cq);
749 : :
750 : : /* Set ring_size to 0 to indicate uninitialized queue */
751 : 0 : cq->ring_size = 0;
752 : :
753 : 0 : shutdown_sq_out:
754 : : idpf_release_lock(&cq->cq_lock);
755 : : idpf_destroy_lock(&cq->cq_lock);
756 : 0 : }
757 : :
758 : : /**
759 : : * cpfl_ctlq_remove - deallocate and remove specified control queue
760 : : */
761 : : static void
762 : 0 : cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
763 : : {
764 [ # # ]: 0 : LIST_REMOVE(cq, cq_list);
765 : 0 : cpfl_ctlq_shutdown(hw, cq);
766 : 0 : idpf_free(hw, cq);
767 : 0 : }
768 : :
769 : : void
770 : 0 : cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
771 : : {
772 : 0 : cpfl_ctlq_remove(hw, cq);
773 : 0 : }
774 : :
775 : : int
776 : 0 : cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
777 : : uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
778 : : {
779 : 0 : return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
780 : : }
781 : :
782 : : int
783 : 0 : cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
784 : : struct idpf_ctlq_msg q_msg[])
785 : : {
786 : 0 : return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
787 : : }
788 : :
789 : : int
790 : 0 : cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
791 : : uint16_t *buff_count, struct idpf_dma_mem **buffs)
792 : : {
793 : 0 : return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
794 : : }
795 : :
796 : : int
797 : 0 : cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
798 : : struct idpf_ctlq_msg *msg_status[])
799 : : {
800 : 0 : return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
801 : : }
|