Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2001-2023 Intel Corporation
3 : : */
4 : :
5 : : #include "idpf_controlq.h"
6 : :
7 : : /**
8 : : * idpf_ctlq_setup_regs - initialize control queue registers
9 : : * @cq: pointer to the specific control queue
10 : : * @q_create_info: structs containing info for each queue to be initialized
11 : : */
12 : : static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
13 : : struct idpf_ctlq_create_info *q_create_info)
14 : : {
15 : : /* set control queue registers in our local struct */
16 : 0 : cq->reg.head = q_create_info->reg.head;
17 : 0 : cq->reg.tail = q_create_info->reg.tail;
18 : 0 : cq->reg.len = q_create_info->reg.len;
19 : 0 : cq->reg.bah = q_create_info->reg.bah;
20 : 0 : cq->reg.bal = q_create_info->reg.bal;
21 : 0 : cq->reg.len_mask = q_create_info->reg.len_mask;
22 : 0 : cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
23 : 0 : cq->reg.head_mask = q_create_info->reg.head_mask;
24 : : }
25 : :
26 : : /**
27 : : * idpf_ctlq_init_regs - Initialize control queue registers
28 : : * @hw: pointer to hw struct
29 : : * @cq: pointer to the specific Control queue
30 : : * @is_rxq: true if receive control queue, false otherwise
31 : : *
32 : : * Initialize registers. The caller is expected to have already initialized the
33 : : * descriptor ring memory and buffer memory
34 : : */
35 : 0 : static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
36 : : bool is_rxq)
37 : : {
38 : : /* Update tail to post pre-allocated buffers for rx queues */
39 [ # # ]: 0 : if (is_rxq)
40 : 0 : wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
41 : :
42 : : /* For non-Mailbox control queues only TAIL need to be set */
43 [ # # ]: 0 : if (cq->q_id != -1)
44 : : return;
45 : :
46 : : /* Clear Head for both send or receive */
47 : 0 : wr32(hw, cq->reg.head, 0);
48 : :
49 : : /* set starting point */
50 : 0 : wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
51 : 0 : wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
52 : 0 : wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
53 : : }
54 : :
55 : : /**
56 : : * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
57 : : * @cq: pointer to the specific Control queue
58 : : *
59 : : * Record the address of the receive queue DMA buffers in the descriptors.
60 : : * The buffers must have been previously allocated.
61 : : */
62 : 0 : static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
63 : : {
64 : : int i;
65 : :
66 [ # # ]: 0 : for (i = 0; i < cq->ring_size; i++) {
67 : 0 : struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
68 : 0 : struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
69 : :
70 : : /* No buffer to post to descriptor, continue */
71 [ # # ]: 0 : if (!bi)
72 : 0 : continue;
73 : :
74 : 0 : desc->flags =
75 : : CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
76 : 0 : desc->opcode = 0;
77 : 0 : desc->datalen = CPU_TO_LE16(bi->size);
78 : 0 : desc->ret_val = 0;
79 : 0 : desc->cookie_high = 0;
80 : 0 : desc->cookie_low = 0;
81 : 0 : desc->params.indirect.addr_high =
82 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
83 : 0 : desc->params.indirect.addr_low =
84 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
85 : 0 : desc->params.indirect.param0 = 0;
86 : 0 : desc->params.indirect.param1 = 0;
87 : : }
88 : 0 : }
89 : :
90 : : /**
91 : : * idpf_ctlq_shutdown - shutdown the CQ
92 : : * @hw: pointer to hw struct
93 : : * @cq: pointer to the specific Control queue
94 : : *
95 : : * The main shutdown routine for any controq queue
96 : : */
97 : 0 : static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
98 : : {
99 : 0 : idpf_acquire_lock(&cq->cq_lock);
100 : :
101 : : #ifdef SIMICS_BUILD
102 : : wr32(hw, cq->reg.head, 0);
103 : : wr32(hw, cq->reg.tail, 0);
104 : : wr32(hw, cq->reg.len, 0);
105 : : wr32(hw, cq->reg.bal, 0);
106 : : wr32(hw, cq->reg.bah, 0);
107 : : #endif /* SIMICS_BUILD */
108 : :
109 : : /* free ring buffers and the ring itself */
110 : 0 : idpf_ctlq_dealloc_ring_res(hw, cq);
111 : :
112 : : /* Set ring_size to 0 to indicate uninitialized queue */
113 : 0 : cq->ring_size = 0;
114 : :
115 : : idpf_release_lock(&cq->cq_lock);
116 : : idpf_destroy_lock(&cq->cq_lock);
117 : 0 : }
118 : :
119 : : /**
120 : : * idpf_ctlq_add - add one control queue
121 : : * @hw: pointer to hardware struct
122 : : * @qinfo: info for queue to be created
123 : : * @cq_out: (output) double pointer to control queue to be created
124 : : *
125 : : * Allocate and initialize a control queue and add it to the control queue list.
126 : : * The cq parameter will be allocated/initialized and passed back to the caller
127 : : * if no errors occur.
128 : : *
129 : : * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
130 : : */
131 : 0 : int idpf_ctlq_add(struct idpf_hw *hw,
132 : : struct idpf_ctlq_create_info *qinfo,
133 : : struct idpf_ctlq_info **cq_out)
134 : : {
135 : : struct idpf_ctlq_info *cq;
136 : : bool is_rxq = false;
137 : : int err;
138 : :
139 [ # # # # : 0 : if (!qinfo->len || !qinfo->buf_size ||
# # ]
140 [ # # ]: 0 : qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
141 : : qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
142 : : return -EINVAL;
143 : :
144 : : cq = (struct idpf_ctlq_info *)
145 : 0 : idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
146 [ # # ]: 0 : if (!cq)
147 : : return -ENOMEM;
148 : :
149 : 0 : cq->cq_type = qinfo->type;
150 : 0 : cq->q_id = qinfo->id;
151 : 0 : cq->buf_size = qinfo->buf_size;
152 : 0 : cq->ring_size = qinfo->len;
153 : :
154 : 0 : cq->next_to_use = 0;
155 : 0 : cq->next_to_clean = 0;
156 : 0 : cq->next_to_post = cq->ring_size - 1;
157 : :
158 [ # # # ]: 0 : switch (qinfo->type) {
159 : 0 : case IDPF_CTLQ_TYPE_MAILBOX_RX:
160 : : is_rxq = true;
161 : : /* fallthrough */
162 : 0 : case IDPF_CTLQ_TYPE_MAILBOX_TX:
163 : 0 : err = idpf_ctlq_alloc_ring_res(hw, cq);
164 : : break;
165 : : default:
166 : : err = -EINVAL;
167 : : break;
168 : : }
169 : :
170 [ # # ]: 0 : if (err)
171 : 0 : goto init_free_q;
172 : :
173 [ # # ]: 0 : if (is_rxq) {
174 : 0 : idpf_ctlq_init_rxq_bufs(cq);
175 : : } else {
176 : : /* Allocate the array of msg pointers for TX queues */
177 : 0 : cq->bi.tx_msg = (struct idpf_ctlq_msg **)
178 : 0 : idpf_calloc(hw, qinfo->len,
179 : : sizeof(struct idpf_ctlq_msg *));
180 [ # # ]: 0 : if (!cq->bi.tx_msg) {
181 : : err = -ENOMEM;
182 : 0 : goto init_dealloc_q_mem;
183 : : }
184 : : }
185 : :
186 : : idpf_ctlq_setup_regs(cq, qinfo);
187 : :
188 : 0 : idpf_ctlq_init_regs(hw, cq, is_rxq);
189 : :
190 : : idpf_init_lock(&(cq->cq_lock));
191 : :
192 [ # # ]: 0 : LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
193 : :
194 : 0 : *cq_out = cq;
195 : 0 : return 0;
196 : :
197 : : init_dealloc_q_mem:
198 : : /* free ring buffers and the ring itself */
199 : 0 : idpf_ctlq_dealloc_ring_res(hw, cq);
200 : 0 : init_free_q:
201 : 0 : idpf_free(hw, cq);
202 : : cq = NULL;
203 : :
204 : 0 : return err;
205 : : }
206 : :
207 : : /**
208 : : * idpf_ctlq_remove - deallocate and remove specified control queue
209 : : * @hw: pointer to hardware struct
210 : : * @cq: pointer to control queue to be removed
211 : : */
212 : 0 : void idpf_ctlq_remove(struct idpf_hw *hw,
213 : : struct idpf_ctlq_info *cq)
214 : : {
215 [ # # ]: 0 : LIST_REMOVE(cq, cq_list);
216 : 0 : idpf_ctlq_shutdown(hw, cq);
217 : 0 : idpf_free(hw, cq);
218 : 0 : }
219 : :
220 : : /**
221 : : * idpf_ctlq_init - main initialization routine for all control queues
222 : : * @hw: pointer to hardware struct
223 : : * @num_q: number of queues to initialize
224 : : * @q_info: array of structs containing info for each queue to be initialized
225 : : *
226 : : * This initializes any number and any type of control queues. This is an all
227 : : * or nothing routine; if one fails, all previously allocated queues will be
228 : : * destroyed. This must be called prior to using the individual add/remove
229 : : * APIs.
230 : : */
231 : 0 : int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
232 : : struct idpf_ctlq_create_info *q_info)
233 : : {
234 : 0 : struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
235 : : int err;
236 : : int i;
237 : :
238 : 0 : LIST_INIT(&hw->cq_list_head);
239 : :
240 [ # # ]: 0 : for (i = 0; i < num_q; i++) {
241 : 0 : struct idpf_ctlq_create_info *qinfo = q_info + i;
242 : :
243 : 0 : err = idpf_ctlq_add(hw, qinfo, &cq);
244 [ # # ]: 0 : if (err)
245 : 0 : goto init_destroy_qs;
246 : : }
247 : :
248 : : return 0;
249 : :
250 : : init_destroy_qs:
251 [ # # ]: 0 : LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
252 : : idpf_ctlq_info, cq_list)
253 : 0 : idpf_ctlq_remove(hw, cq);
254 : :
255 : : return err;
256 : : }
257 : :
258 : : /**
259 : : * idpf_ctlq_deinit - destroy all control queues
260 : : * @hw: pointer to hw struct
261 : : */
262 : 0 : void idpf_ctlq_deinit(struct idpf_hw *hw)
263 : : {
264 : : struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
265 : :
266 [ # # ]: 0 : LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
267 : : idpf_ctlq_info, cq_list)
268 : 0 : idpf_ctlq_remove(hw, cq);
269 : 0 : }
270 : :
271 : : /**
272 : : * idpf_ctlq_send - send command to Control Queue (CTQ)
273 : : * @hw: pointer to hw struct
274 : : * @cq: handle to control queue struct to send on
275 : : * @num_q_msg: number of messages to send on control queue
276 : : * @q_msg: pointer to array of queue messages to be sent
277 : : *
278 : : * The caller is expected to allocate DMAable buffers and pass them to the
279 : : * send routine via the q_msg struct / control queue specific data struct.
280 : : * The control queue will hold a reference to each send message until
281 : : * the completion for that message has been cleaned.
282 : : * Since all q_msgs being sent are store in native endianness, these values
283 : : * must be converted to LE before being written to the hw descriptor.
284 : : */
285 : 0 : int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
286 : : u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
287 : : {
288 : : struct idpf_ctlq_desc *desc;
289 : : int num_desc_avail;
290 : : int err = 0;
291 : : int i;
292 : :
293 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
294 : : return -ENOBUFS;
295 : :
296 : 0 : idpf_acquire_lock(&cq->cq_lock);
297 : :
298 : : /* Ensure there are enough descriptors to send all messages */
299 [ # # ]: 0 : num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
300 [ # # # # ]: 0 : if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
301 : : err = -ENOSPC;
302 : 0 : goto err_unlock;
303 : : }
304 : :
305 [ # # ]: 0 : for (i = 0; i < num_q_msg; i++) {
306 : 0 : struct idpf_ctlq_msg *msg = &q_msg[i];
307 : :
308 : 0 : desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
309 : :
310 : 0 : desc->opcode = CPU_TO_LE16(msg->opcode);
311 : 0 : desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
312 : :
313 : 0 : desc->cookie_high = CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
314 : 0 : desc->cookie_low = CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
315 : :
316 : 0 : desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
317 : : IDPF_CTLQ_FLAG_HOST_ID_S);
318 [ # # ]: 0 : if (msg->data_len) {
319 : 0 : struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
320 : :
321 : 0 : desc->datalen |= CPU_TO_LE16(msg->data_len);
322 : 0 : desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
323 : 0 : desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
324 : :
325 : : /* Update the address values in the desc with the pa
326 : : * value for respective buffer
327 : : */
328 : 0 : desc->params.indirect.addr_high =
329 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
330 : 0 : desc->params.indirect.addr_low =
331 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
332 : :
333 [ # # ]: 0 : idpf_memcpy(&desc->params, msg->ctx.indirect.context,
334 : : IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
335 : : #ifdef SIMICS_BUILD
336 : : /* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
337 : : * need to set peer PF function id in param0 for Simics
338 : : */
339 : : if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
340 : : desc->params.indirect.param0 =
341 : : CPU_TO_LE32(msg->func_id);
342 : : }
343 : : #endif
344 : : } else {
345 [ # # ]: 0 : idpf_memcpy(&desc->params, msg->ctx.direct,
346 : : IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
347 : : #ifdef SIMICS_BUILD
348 : : /* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
349 : : * need to set peer PF function id in param0 for Simics
350 : : */
351 : : if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
352 : : desc->params.direct.param0 =
353 : : CPU_TO_LE32(msg->func_id);
354 : : }
355 : : #endif
356 : : }
357 : :
358 : : /* Store buffer info */
359 : 0 : cq->bi.tx_msg[cq->next_to_use] = msg;
360 : :
361 : 0 : (cq->next_to_use)++;
362 [ # # ]: 0 : if (cq->next_to_use == cq->ring_size)
363 : 0 : cq->next_to_use = 0;
364 : : }
365 : :
366 : : /* Force memory write to complete before letting hardware
367 : : * know that there are new descriptors to fetch.
368 : : */
369 : 0 : idpf_wmb();
370 : :
371 : 0 : wr32(hw, cq->reg.tail, cq->next_to_use);
372 : :
373 : 0 : err_unlock:
374 : : idpf_release_lock(&cq->cq_lock);
375 : :
376 : 0 : return err;
377 : : }
378 : :
379 : : /**
380 : : * __idpf_ctlq_clean_sq - helper function to reclaim descriptors on HW write
381 : : * back for the requested queue
382 : : * @cq: pointer to the specific Control queue
383 : : * @clean_count: (input|output) number of descriptors to clean as input, and
384 : : * number of descriptors actually cleaned as output
385 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
386 : : * to be allocated by caller
387 : : * @force: (input) clean descriptors which were not done yet. Use with caution
388 : : * in kernel mode only
389 : : *
390 : : * Returns an array of message pointers associated with the cleaned
391 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
392 : : * descriptors. The status will be returned for each; any messages that failed
393 : : * to send will have a non-zero status. The caller is expected to free original
394 : : * ctlq_msgs and free or reuse the DMA buffers.
395 : : */
396 : 0 : static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
397 : : struct idpf_ctlq_msg *msg_status[], bool force)
398 : : {
399 : : struct idpf_ctlq_desc *desc;
400 : : u16 i, num_to_clean;
401 : : u16 ntc, desc_err;
402 : :
403 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
404 : : return -ENOBUFS;
405 : :
406 [ # # ]: 0 : if (*clean_count == 0)
407 : : return 0;
408 [ # # ]: 0 : if (*clean_count > cq->ring_size)
409 : : return -EINVAL;
410 : :
411 : 0 : idpf_acquire_lock(&cq->cq_lock);
412 : :
413 : 0 : ntc = cq->next_to_clean;
414 : :
415 : 0 : num_to_clean = *clean_count;
416 : :
417 [ # # ]: 0 : for (i = 0; i < num_to_clean; i++) {
418 : : /* Fetch next descriptor and check if marked as done */
419 : 0 : desc = IDPF_CTLQ_DESC(cq, ntc);
420 [ # # # # ]: 0 : if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
421 : : break;
422 : :
423 : : /* strip off FW internal code */
424 : 0 : desc_err = LE16_TO_CPU(desc->ret_val) & 0xff;
425 : :
426 : 0 : msg_status[i] = cq->bi.tx_msg[ntc];
427 [ # # ]: 0 : if (!msg_status[i])
428 : : break;
429 : 0 : msg_status[i]->status = desc_err;
430 : :
431 [ # # ]: 0 : cq->bi.tx_msg[ntc] = NULL;
432 : :
433 : : /* Zero out any stale data */
434 : : idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
435 : :
436 : 0 : ntc++;
437 [ # # ]: 0 : if (ntc == cq->ring_size)
438 : : ntc = 0;
439 : : }
440 : :
441 : 0 : cq->next_to_clean = ntc;
442 : :
443 : : idpf_release_lock(&cq->cq_lock);
444 : :
445 : : /* Return number of descriptors actually cleaned */
446 : 0 : *clean_count = i;
447 : :
448 : 0 : return 0;
449 : : }
450 : :
451 : : /**
452 : : * idpf_ctlq_clean_sq_force - reclaim all descriptors on HW write back for the
453 : : * requested queue. Use only in kernel mode.
454 : : * @cq: pointer to the specific Control queue
455 : : * @clean_count: (input|output) number of descriptors to clean as input, and
456 : : * number of descriptors actually cleaned as output
457 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
458 : : * to be allocated by caller
459 : : *
460 : : * Returns an array of message pointers associated with the cleaned
461 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
462 : : * descriptors. The status will be returned for each; any messages that failed
463 : : * to send will have a non-zero status. The caller is expected to free original
464 : : * ctlq_msgs and free or reuse the DMA buffers.
465 : : */
466 : 0 : int idpf_ctlq_clean_sq_force(struct idpf_ctlq_info *cq, u16 *clean_count,
467 : : struct idpf_ctlq_msg *msg_status[])
468 : : {
469 : 0 : return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, true);
470 : : }
471 : :
472 : : /**
473 : : * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
474 : : * requested queue
475 : : * @cq: pointer to the specific Control queue
476 : : * @clean_count: (input|output) number of descriptors to clean as input, and
477 : : * number of descriptors actually cleaned as output
478 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
479 : : * to be allocated by caller
480 : : *
481 : : * Returns an array of message pointers associated with the cleaned
482 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
483 : : * descriptors. The status will be returned for each; any messages that failed
484 : : * to send will have a non-zero status. The caller is expected to free original
485 : : * ctlq_msgs and free or reuse the DMA buffers.
486 : : */
487 : 0 : int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
488 : : struct idpf_ctlq_msg *msg_status[])
489 : : {
490 : 0 : return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, false);
491 : : }
492 : :
493 : : /**
494 : : * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
495 : : * @hw: pointer to hw struct
496 : : * @cq: pointer to control queue handle
497 : : * @buff_count: (input|output) input is number of buffers caller is trying to
498 : : * return; output is number of buffers that were not posted
499 : : * @buffs: array of pointers to dma mem structs to be given to hardware
500 : : *
501 : : * Caller uses this function to return DMA buffers to the descriptor ring after
502 : : * consuming them; buff_count will be the number of buffers.
503 : : *
504 : : * Note: this function needs to be called after a receive call even
505 : : * if there are no DMA buffers to be returned, i.e. buff_count = 0,
506 : : * buffs = NULL to support direct commands
507 : : */
508 : 0 : int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
509 : : u16 *buff_count, struct idpf_dma_mem **buffs)
510 : : {
511 : : struct idpf_ctlq_desc *desc;
512 : 0 : u16 ntp = cq->next_to_post;
513 : : bool buffs_avail = false;
514 : 0 : u16 tbp = ntp + 1;
515 : : int i = 0;
516 : :
517 [ # # ]: 0 : if (*buff_count > cq->ring_size)
518 : : return -EINVAL;
519 : :
520 [ # # ]: 0 : if (*buff_count > 0)
521 : : buffs_avail = true;
522 : :
523 : 0 : idpf_acquire_lock(&cq->cq_lock);
524 : :
525 [ # # ]: 0 : if (tbp >= cq->ring_size)
526 : : tbp = 0;
527 : :
528 [ # # ]: 0 : if (tbp == cq->next_to_clean)
529 : : /* Nothing to do */
530 : 0 : goto post_buffs_out;
531 : :
532 : : /* Post buffers for as many as provided or up until the last one used */
533 [ # # ]: 0 : while (ntp != cq->next_to_clean) {
534 : 0 : desc = IDPF_CTLQ_DESC(cq, ntp);
535 : :
536 [ # # ]: 0 : if (cq->bi.rx_buff[ntp])
537 : 0 : goto fill_desc;
538 [ # # ]: 0 : if (!buffs_avail) {
539 : : /* If the caller hasn't given us any buffers or
540 : : * there are none left, search the ring itself
541 : : * for an available buffer to move to this
542 : : * entry starting at the next entry in the ring
543 : : */
544 : 0 : tbp = ntp + 1;
545 : :
546 : : /* Wrap ring if necessary */
547 [ # # ]: 0 : if (tbp >= cq->ring_size)
548 : : tbp = 0;
549 : :
550 [ # # ]: 0 : while (tbp != cq->next_to_clean) {
551 [ # # ]: 0 : if (cq->bi.rx_buff[tbp]) {
552 : 0 : cq->bi.rx_buff[ntp] =
553 : : cq->bi.rx_buff[tbp];
554 : 0 : cq->bi.rx_buff[tbp] = NULL;
555 : :
556 : : /* Found a buffer, no need to
557 : : * search anymore
558 : : */
559 : 0 : break;
560 : : }
561 : :
562 : : /* Wrap ring if necessary */
563 : 0 : tbp++;
564 [ # # ]: 0 : if (tbp >= cq->ring_size)
565 : : tbp = 0;
566 : : }
567 : :
568 [ # # ]: 0 : if (tbp == cq->next_to_clean)
569 : 0 : goto post_buffs_out;
570 : : } else {
571 : : /* Give back pointer to DMA buffer */
572 : 0 : cq->bi.rx_buff[ntp] = buffs[i];
573 : 0 : i++;
574 : :
575 [ # # ]: 0 : if (i >= *buff_count)
576 : : buffs_avail = false;
577 : : }
578 : :
579 : 0 : fill_desc:
580 : 0 : desc->flags =
581 : : CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
582 : :
583 : : /* Post buffers to descriptor */
584 : 0 : desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
585 : 0 : desc->params.indirect.addr_high =
586 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
587 : 0 : desc->params.indirect.addr_low =
588 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
589 : :
590 : 0 : ntp++;
591 [ # # ]: 0 : if (ntp == cq->ring_size)
592 : : ntp = 0;
593 : : }
594 : :
595 : 0 : post_buffs_out:
596 : : /* Only update tail if buffers were actually posted */
597 [ # # ]: 0 : if (cq->next_to_post != ntp) {
598 [ # # ]: 0 : if (ntp)
599 : : /* Update next_to_post to ntp - 1 since current ntp
600 : : * will not have a buffer
601 : : */
602 : 0 : cq->next_to_post = ntp - 1;
603 : : else
604 : : /* Wrap to end of end ring since current ntp is 0 */
605 : 0 : cq->next_to_post = cq->ring_size - 1;
606 : :
607 : 0 : idpf_wmb();
608 : :
609 : 0 : wr32(hw, cq->reg.tail, cq->next_to_post);
610 : : }
611 : :
612 : : idpf_release_lock(&cq->cq_lock);
613 : :
614 : : /* return the number of buffers that were not posted */
615 : 0 : *buff_count = *buff_count - i;
616 : :
617 : 0 : return 0;
618 : : }
619 : :
620 : : /**
621 : : * idpf_ctlq_recv - receive control queue message call back
622 : : * @cq: pointer to control queue handle to receive on
623 : : * @num_q_msg: (input|output) input number of messages that should be received;
624 : : * output number of messages actually received
625 : : * @q_msg: (output) array of received control queue messages on this q;
626 : : * needs to be pre-allocated by caller for as many messages as requested
627 : : *
628 : : * Called by interrupt handler or polling mechanism. Caller is expected
629 : : * to free buffers
630 : : */
631 : 0 : int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
632 : : struct idpf_ctlq_msg *q_msg)
633 : : {
634 : : u16 num_to_clean, ntc, ret_val, flags;
635 : : struct idpf_ctlq_desc *desc;
636 : : int err = 0;
637 : : u16 i;
638 : :
639 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
640 : : return -ENOBUFS;
641 : :
642 [ # # ]: 0 : if (*num_q_msg == 0)
643 : : return 0;
644 [ # # ]: 0 : else if (*num_q_msg > cq->ring_size)
645 : : return -EINVAL;
646 : :
647 : : /* take the lock before we start messing with the ring */
648 : 0 : idpf_acquire_lock(&cq->cq_lock);
649 : :
650 : 0 : ntc = cq->next_to_clean;
651 : :
652 : 0 : num_to_clean = *num_q_msg;
653 : :
654 [ # # ]: 0 : for (i = 0; i < num_to_clean; i++) {
655 : : /* Fetch next descriptor and check if marked as done */
656 : 0 : desc = IDPF_CTLQ_DESC(cq, ntc);
657 : 0 : flags = LE16_TO_CPU(desc->flags);
658 : :
659 [ # # ]: 0 : if (!(flags & IDPF_CTLQ_FLAG_DD))
660 : : break;
661 : :
662 : 0 : ret_val = LE16_TO_CPU(desc->ret_val);
663 : :
664 : 0 : q_msg[i].vmvf_type = (flags &
665 : : (IDPF_CTLQ_FLAG_FTYPE_VM |
666 : 0 : IDPF_CTLQ_FLAG_FTYPE_PF)) >>
667 : : IDPF_CTLQ_FLAG_FTYPE_S;
668 : :
669 [ # # ]: 0 : if (flags & IDPF_CTLQ_FLAG_ERR)
670 : : err = -EBADMSG;
671 : :
672 : 0 : q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
673 : 0 : q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
674 : :
675 : 0 : q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
676 : 0 : q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
677 : 0 : q_msg[i].status = ret_val;
678 : :
679 [ # # ]: 0 : if (desc->datalen) {
680 [ # # ]: 0 : idpf_memcpy(q_msg[i].ctx.indirect.context,
681 : : &desc->params.indirect,
682 : : IDPF_INDIRECT_CTX_SIZE,
683 : : IDPF_DMA_TO_NONDMA);
684 : :
685 : : /* Assign pointer to dma buffer to ctlq_msg array
686 : : * to be given to upper layer
687 : : */
688 : 0 : q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
689 : :
690 : : /* Zero out pointer to DMA buffer info;
691 : : * will be repopulated by post buffers API
692 : : */
693 : 0 : cq->bi.rx_buff[ntc] = NULL;
694 : : } else {
695 [ # # ]: 0 : idpf_memcpy(q_msg[i].ctx.direct,
696 : : desc->params.raw,
697 : : IDPF_DIRECT_CTX_SIZE,
698 : : IDPF_DMA_TO_NONDMA);
699 : : }
700 : :
701 : : /* Zero out stale data in descriptor */
702 : : idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
703 : : IDPF_DMA_MEM);
704 : :
705 : 0 : ntc++;
706 [ # # ]: 0 : if (ntc == cq->ring_size)
707 : : ntc = 0;
708 : : };
709 : :
710 : 0 : cq->next_to_clean = ntc;
711 : :
712 : : idpf_release_lock(&cq->cq_lock);
713 : :
714 : 0 : *num_q_msg = i;
715 [ # # ]: 0 : if (*num_q_msg == 0)
716 : : err = -ENOMSG;
717 : :
718 : : return err;
719 : : }
|