Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2001-2023 Intel Corporation
3 : : */
4 : :
5 : : #include "ice_common.h"
6 : :
7 : : #define ICE_CQ_INIT_REGS(qinfo, prefix) \
8 : : do { \
9 : : (qinfo)->sq.head = prefix##_ATQH; \
10 : : (qinfo)->sq.tail = prefix##_ATQT; \
11 : : (qinfo)->sq.len = prefix##_ATQLEN; \
12 : : (qinfo)->sq.bah = prefix##_ATQBAH; \
13 : : (qinfo)->sq.bal = prefix##_ATQBAL; \
14 : : (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
15 : : (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
16 : : (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
17 : : (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
18 : : (qinfo)->rq.head = prefix##_ARQH; \
19 : : (qinfo)->rq.tail = prefix##_ARQT; \
20 : : (qinfo)->rq.len = prefix##_ARQLEN; \
21 : : (qinfo)->rq.bah = prefix##_ARQBAH; \
22 : : (qinfo)->rq.bal = prefix##_ARQBAL; \
23 : : (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
24 : : (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
25 : : (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
26 : : (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
27 : : } while (0)
28 : :
29 : : /**
30 : : * ice_adminq_init_regs - Initialize AdminQ registers
31 : : * @hw: pointer to the hardware structure
32 : : *
33 : : * This assumes the alloc_sq and alloc_rq functions have already been called
34 : : */
35 : 0 : static void ice_adminq_init_regs(struct ice_hw *hw)
36 : : {
37 : : struct ice_ctl_q_info *cq = &hw->adminq;
38 : :
39 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
40 : :
41 : 0 : ICE_CQ_INIT_REGS(cq, PF_FW);
42 : 0 : }
43 : :
44 : : /**
45 : : * ice_mailbox_init_regs - Initialize Mailbox registers
46 : : * @hw: pointer to the hardware structure
47 : : *
48 : : * This assumes the alloc_sq and alloc_rq functions have already been called
49 : : */
50 : : static void ice_mailbox_init_regs(struct ice_hw *hw)
51 : : {
52 : : struct ice_ctl_q_info *cq = &hw->mailboxq;
53 : :
54 : 0 : ICE_CQ_INIT_REGS(cq, PF_MBX);
55 : : }
56 : :
57 : : /**
58 : : * ice_sb_init_regs - Initialize Sideband registers
59 : : * @hw: pointer to the hardware structure
60 : : *
61 : : * This assumes the alloc_sq and alloc_rq functions have already been called
62 : : */
63 : 0 : static void ice_sb_init_regs(struct ice_hw *hw)
64 : : {
65 : : struct ice_ctl_q_info *cq = &hw->sbq;
66 : :
67 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
68 : :
69 : 0 : ICE_CQ_INIT_REGS(cq, PF_SB);
70 : 0 : }
71 : :
72 : : /**
73 : : * ice_check_sq_alive
74 : : * @hw: pointer to the HW struct
75 : : * @cq: pointer to the specific Control queue
76 : : *
77 : : * Returns true if Queue is enabled else false.
78 : : */
79 : 0 : bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
80 : : {
81 : : /* check both queue-length and queue-enable fields */
82 [ # # # # : 0 : if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
# # ]
83 : 0 : return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
84 : 0 : cq->sq.len_ena_mask)) ==
85 : 0 : (cq->num_sq_entries | cq->sq.len_ena_mask);
86 : :
87 : : return false;
88 : : }
89 : :
90 : : /**
91 : : * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
92 : : * @hw: pointer to the hardware structure
93 : : * @cq: pointer to the specific Control queue
94 : : */
95 : : static int
96 : : ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
97 : : {
98 : 0 : size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
99 : :
100 : 0 : cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
101 [ # # ]: 0 : if (!cq->sq.desc_buf.va)
102 : : return ICE_ERR_NO_MEMORY;
103 : :
104 : : return 0;
105 : : }
106 : :
107 : : /**
108 : : * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
109 : : * @hw: pointer to the hardware structure
110 : : * @cq: pointer to the specific Control queue
111 : : */
112 : : static int
113 : : ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
114 : : {
115 : 0 : size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
116 : :
117 : 0 : cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
118 [ # # ]: 0 : if (!cq->rq.desc_buf.va)
119 : : return ICE_ERR_NO_MEMORY;
120 : : return 0;
121 : : }
122 : :
123 : : /**
124 : : * ice_free_cq_ring - Free control queue ring
125 : : * @hw: pointer to the hardware structure
126 : : * @ring: pointer to the specific control queue ring
127 : : *
128 : : * This assumes the posted buffers have already been cleaned
129 : : * and de-allocated
130 : : */
131 : : static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
132 : : {
133 : 0 : ice_free_dma_mem(hw, &ring->desc_buf);
134 : 0 : }
135 : :
136 : : /**
137 : : * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
138 : : * @hw: pointer to the hardware structure
139 : : * @cq: pointer to the specific Control queue
140 : : */
141 : : static int
142 : 0 : ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
143 : : {
144 : : int i;
145 : :
146 : : /* We'll be allocating the buffer info memory first, then we can
147 : : * allocate the mapped buffers for the event processing
148 : : */
149 : 0 : cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
150 : : sizeof(cq->rq.desc_buf));
151 [ # # ]: 0 : if (!cq->rq.dma_head)
152 : : return ICE_ERR_NO_MEMORY;
153 : 0 : cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
154 : :
155 : : /* allocate the mapped buffers */
156 [ # # ]: 0 : for (i = 0; i < cq->num_rq_entries; i++) {
157 : : struct ice_aq_desc *desc;
158 : : struct ice_dma_mem *bi;
159 : :
160 : 0 : bi = &cq->rq.r.rq_bi[i];
161 : 0 : bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
162 [ # # ]: 0 : if (!bi->va)
163 : 0 : goto unwind_alloc_rq_bufs;
164 : :
165 : : /* now configure the descriptors for use */
166 : 0 : desc = ICE_CTL_Q_DESC(cq->rq, i);
167 : :
168 : 0 : desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
169 [ # # ]: 0 : if (cq->rq_buf_size > ICE_AQ_LG_BUF)
170 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
171 : 0 : desc->opcode = 0;
172 : : /* This is in accordance with control queue design, there is no
173 : : * register for buffer size configuration
174 : : */
175 : 0 : desc->datalen = CPU_TO_LE16(bi->size);
176 : 0 : desc->retval = 0;
177 : 0 : desc->cookie_high = 0;
178 : 0 : desc->cookie_low = 0;
179 : 0 : desc->params.generic.addr_high =
180 : 0 : CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
181 : 0 : desc->params.generic.addr_low =
182 : 0 : CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
183 : 0 : desc->params.generic.param0 = 0;
184 : 0 : desc->params.generic.param1 = 0;
185 : : }
186 : : return 0;
187 : :
188 : : unwind_alloc_rq_bufs:
189 : : /* don't try to free the one that failed... */
190 : 0 : i--;
191 [ # # ]: 0 : for (; i >= 0; i--)
192 : 0 : ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
193 : 0 : cq->rq.r.rq_bi = NULL;
194 : 0 : ice_free(hw, cq->rq.dma_head);
195 : 0 : cq->rq.dma_head = NULL;
196 : :
197 : 0 : return ICE_ERR_NO_MEMORY;
198 : : }
199 : :
200 : : /**
201 : : * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
202 : : * @hw: pointer to the hardware structure
203 : : * @cq: pointer to the specific Control queue
204 : : */
205 : : static int
206 : 0 : ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
207 : : {
208 : : int i;
209 : :
210 : : /* No mapped memory needed yet, just the buffer info structures */
211 : 0 : cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
212 : : sizeof(cq->sq.desc_buf));
213 [ # # ]: 0 : if (!cq->sq.dma_head)
214 : : return ICE_ERR_NO_MEMORY;
215 : 0 : cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
216 : :
217 : : /* allocate the mapped buffers */
218 [ # # ]: 0 : for (i = 0; i < cq->num_sq_entries; i++) {
219 : : struct ice_dma_mem *bi;
220 : :
221 : 0 : bi = &cq->sq.r.sq_bi[i];
222 : 0 : bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
223 [ # # ]: 0 : if (!bi->va)
224 : 0 : goto unwind_alloc_sq_bufs;
225 : : }
226 : : return 0;
227 : :
228 : : unwind_alloc_sq_bufs:
229 : : /* don't try to free the one that failed... */
230 : 0 : i--;
231 [ # # ]: 0 : for (; i >= 0; i--)
232 : 0 : ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
233 : 0 : cq->sq.r.sq_bi = NULL;
234 : 0 : ice_free(hw, cq->sq.dma_head);
235 : 0 : cq->sq.dma_head = NULL;
236 : :
237 : 0 : return ICE_ERR_NO_MEMORY;
238 : : }
239 : :
240 : : static int
241 : 0 : ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
242 : : {
243 : : /* Clear Head and Tail */
244 : 0 : wr32(hw, ring->head, 0);
245 : 0 : wr32(hw, ring->tail, 0);
246 : :
247 : : /* set starting point */
248 : 0 : wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
249 : 0 : wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
250 : 0 : wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
251 : :
252 : : /* Check one register to verify that config was applied */
253 [ # # ]: 0 : if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
254 : 0 : return ICE_ERR_AQ_ERROR;
255 : :
256 : : return 0;
257 : : }
258 : :
259 : : /**
260 : : * ice_cfg_sq_regs - configure Control ATQ registers
261 : : * @hw: pointer to the hardware structure
262 : : * @cq: pointer to the specific Control queue
263 : : *
264 : : * Configure base address and length registers for the transmit queue
265 : : */
266 : : static int
267 : : ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
268 : : {
269 : 0 : return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
270 : : }
271 : :
272 : : /**
273 : : * ice_cfg_rq_regs - configure Control ARQ register
274 : : * @hw: pointer to the hardware structure
275 : : * @cq: pointer to the specific Control queue
276 : : *
277 : : * Configure base address and length registers for the receive (event queue)
278 : : */
279 : : static int
280 : 0 : ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
281 : : {
282 : : int status;
283 : :
284 : 0 : status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
285 [ # # ]: 0 : if (status)
286 : : return status;
287 : :
288 : : /* Update tail in the HW to post pre-allocated buffers */
289 : 0 : wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
290 : :
291 : 0 : return 0;
292 : : }
293 : :
294 : : #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
295 : : do { \
296 : : /* free descriptors */ \
297 : : if ((qi)->ring.r.ring##_bi) { \
298 : : int i; \
299 : : \
300 : : for (i = 0; i < (qi)->num_##ring##_entries; i++) \
301 : : if ((qi)->ring.r.ring##_bi[i].pa) \
302 : : ice_free_dma_mem((hw), \
303 : : &(qi)->ring.r.ring##_bi[i]); \
304 : : } \
305 : : /* free DMA head */ \
306 : : ice_free(hw, (qi)->ring.dma_head); \
307 : : } while (0)
308 : :
309 : : /**
310 : : * ice_init_sq - main initialization routine for Control ATQ
311 : : * @hw: pointer to the hardware structure
312 : : * @cq: pointer to the specific Control queue
313 : : *
314 : : * This is the main initialization routine for the Control Send Queue
315 : : * Prior to calling this function, the driver *MUST* set the following fields
316 : : * in the cq->structure:
317 : : * - cq->num_sq_entries
318 : : * - cq->sq_buf_size
319 : : *
320 : : * Do *NOT* hold the lock when calling this as the memory allocation routines
321 : : * called are not going to be atomic context safe
322 : : */
323 : 0 : static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
324 : : {
325 : : int ret_code;
326 : :
327 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
328 : :
329 [ # # ]: 0 : if (cq->sq.count > 0) {
330 : : /* queue already initialized */
331 : : ret_code = ICE_ERR_NOT_READY;
332 : 0 : goto init_ctrlq_exit;
333 : : }
334 : :
335 : : /* verify input for valid configuration */
336 [ # # # # ]: 0 : if (!cq->num_sq_entries || !cq->sq_buf_size) {
337 : : ret_code = ICE_ERR_CFG;
338 : 0 : goto init_ctrlq_exit;
339 : : }
340 : :
341 : 0 : cq->sq.next_to_use = 0;
342 : 0 : cq->sq.next_to_clean = 0;
343 : :
344 : : /* allocate the ring memory */
345 : : ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
346 : : if (ret_code)
347 : 0 : goto init_ctrlq_exit;
348 : :
349 : : /* allocate buffers in the rings */
350 : 0 : ret_code = ice_alloc_sq_bufs(hw, cq);
351 [ # # ]: 0 : if (ret_code)
352 : 0 : goto init_ctrlq_free_rings;
353 : :
354 : : /* initialize base registers */
355 : : ret_code = ice_cfg_sq_regs(hw, cq);
356 [ # # ]: 0 : if (ret_code)
357 : 0 : goto init_ctrlq_free_rings;
358 : :
359 : : /* success! */
360 : 0 : cq->sq.count = cq->num_sq_entries;
361 : 0 : goto init_ctrlq_exit;
362 : :
363 : 0 : init_ctrlq_free_rings:
364 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, sq);
# # ]
365 : : ice_free_cq_ring(hw, &cq->sq);
366 : :
367 : 0 : init_ctrlq_exit:
368 : 0 : return ret_code;
369 : : }
370 : :
371 : : /**
372 : : * ice_init_rq - initialize receive side of a control queue
373 : : * @hw: pointer to the hardware structure
374 : : * @cq: pointer to the specific Control queue
375 : : *
376 : : * The main initialization routine for Receive side of a control queue.
377 : : * Prior to calling this function, the driver *MUST* set the following fields
378 : : * in the cq->structure:
379 : : * - cq->num_rq_entries
380 : : * - cq->rq_buf_size
381 : : *
382 : : * Do *NOT* hold the lock when calling this as the memory allocation routines
383 : : * called are not going to be atomic context safe
384 : : */
385 : 0 : static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
386 : : {
387 : : int ret_code;
388 : :
389 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
390 : :
391 [ # # ]: 0 : if (cq->rq.count > 0) {
392 : : /* queue already initialized */
393 : : ret_code = ICE_ERR_NOT_READY;
394 : 0 : goto init_ctrlq_exit;
395 : : }
396 : :
397 : : /* verify input for valid configuration */
398 [ # # # # ]: 0 : if (!cq->num_rq_entries || !cq->rq_buf_size) {
399 : : ret_code = ICE_ERR_CFG;
400 : 0 : goto init_ctrlq_exit;
401 : : }
402 : :
403 : 0 : cq->rq.next_to_use = 0;
404 : 0 : cq->rq.next_to_clean = 0;
405 : :
406 : : /* allocate the ring memory */
407 : : ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
408 : : if (ret_code)
409 : 0 : goto init_ctrlq_exit;
410 : :
411 : : /* allocate buffers in the rings */
412 : 0 : ret_code = ice_alloc_rq_bufs(hw, cq);
413 [ # # ]: 0 : if (ret_code)
414 : 0 : goto init_ctrlq_free_rings;
415 : :
416 : : /* initialize base registers */
417 : 0 : ret_code = ice_cfg_rq_regs(hw, cq);
418 [ # # ]: 0 : if (ret_code)
419 : 0 : goto init_ctrlq_free_rings;
420 : :
421 : : /* success! */
422 : 0 : cq->rq.count = cq->num_rq_entries;
423 : 0 : goto init_ctrlq_exit;
424 : :
425 : 0 : init_ctrlq_free_rings:
426 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, rq);
# # ]
427 : : ice_free_cq_ring(hw, &cq->rq);
428 : :
429 : 0 : init_ctrlq_exit:
430 : 0 : return ret_code;
431 : : }
432 : :
433 : : /**
434 : : * ice_shutdown_sq - shutdown the transmit side of a control queue
435 : : * @hw: pointer to the hardware structure
436 : : * @cq: pointer to the specific Control queue
437 : : *
438 : : * The main shutdown routine for the Control Transmit Queue
439 : : */
440 : : static int
441 : 0 : ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
442 : : {
443 : : int ret_code = 0;
444 : :
445 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
446 : :
447 : 0 : ice_acquire_lock(&cq->sq_lock);
448 : :
449 [ # # ]: 0 : if (!cq->sq.count) {
450 : : ret_code = ICE_ERR_NOT_READY;
451 : 0 : goto shutdown_sq_out;
452 : : }
453 : :
454 : : /* Stop processing of the control queue */
455 : 0 : wr32(hw, cq->sq.head, 0);
456 : 0 : wr32(hw, cq->sq.tail, 0);
457 : 0 : wr32(hw, cq->sq.len, 0);
458 : 0 : wr32(hw, cq->sq.bal, 0);
459 : 0 : wr32(hw, cq->sq.bah, 0);
460 : :
461 : 0 : cq->sq.count = 0; /* to indicate uninitialized queue */
462 : :
463 : : /* free ring buffers and the ring itself */
464 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, sq);
# # ]
465 : : ice_free_cq_ring(hw, &cq->sq);
466 : :
467 : 0 : shutdown_sq_out:
468 : : ice_release_lock(&cq->sq_lock);
469 : 0 : return ret_code;
470 : : }
471 : :
472 : : /**
473 : : * ice_aq_ver_check - Check the reported AQ API version
474 : : * @hw: pointer to the hardware structure
475 : : *
476 : : * Checks if the driver should load on a given AQ API version.
477 : : *
478 : : * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
479 : : */
480 : 0 : static bool ice_aq_ver_check(struct ice_hw *hw)
481 : : {
482 : : u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
483 [ # # ]: 0 : u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
484 : :
485 [ # # ]: 0 : if (hw->api_maj_ver > exp_fw_api_ver_major) {
486 : : /* Major API version is newer than expected, don't load */
487 [ # # ]: 0 : ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
488 : 0 : return false;
489 [ # # ]: 0 : } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
490 [ # # ]: 0 : if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
491 [ # # ]: 0 : ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
492 : : hw->api_maj_ver, hw->api_min_ver,
493 : : exp_fw_api_ver_major, exp_fw_api_ver_minor);
494 [ # # ]: 0 : else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
495 [ # # ]: 0 : ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
496 : : hw->api_maj_ver, hw->api_min_ver,
497 : : exp_fw_api_ver_major, exp_fw_api_ver_minor);
498 : : } else {
499 : : /* Major API version is older than expected, log a warning */
500 [ # # ]: 0 : ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
501 : : hw->api_maj_ver, hw->api_min_ver,
502 : : exp_fw_api_ver_major, exp_fw_api_ver_minor);
503 : : }
504 : : return true;
505 : : }
506 : :
507 : : /**
508 : : * ice_shutdown_rq - shutdown Control ARQ
509 : : * @hw: pointer to the hardware structure
510 : : * @cq: pointer to the specific Control queue
511 : : *
512 : : * The main shutdown routine for the Control Receive Queue
513 : : */
514 : : static int
515 : 0 : ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
516 : : {
517 : : int ret_code = 0;
518 : :
519 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
520 : :
521 : 0 : ice_acquire_lock(&cq->rq_lock);
522 : :
523 [ # # ]: 0 : if (!cq->rq.count) {
524 : : ret_code = ICE_ERR_NOT_READY;
525 : 0 : goto shutdown_rq_out;
526 : : }
527 : :
528 : : /* Stop Control Queue processing */
529 : 0 : wr32(hw, cq->rq.head, 0);
530 : 0 : wr32(hw, cq->rq.tail, 0);
531 : 0 : wr32(hw, cq->rq.len, 0);
532 : 0 : wr32(hw, cq->rq.bal, 0);
533 : 0 : wr32(hw, cq->rq.bah, 0);
534 : :
535 : : /* set rq.count to 0 to indicate uninitialized queue */
536 : 0 : cq->rq.count = 0;
537 : :
538 : : /* free ring buffers and the ring itself */
539 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, rq);
# # ]
540 : : ice_free_cq_ring(hw, &cq->rq);
541 : :
542 : 0 : shutdown_rq_out:
543 : : ice_release_lock(&cq->rq_lock);
544 : 0 : return ret_code;
545 : : }
546 : :
547 : : /**
548 : : * ice_init_check_adminq - Check version for Admin Queue to know if its alive
549 : : * @hw: pointer to the hardware structure
550 : : */
551 : 0 : static int ice_init_check_adminq(struct ice_hw *hw)
552 : : {
553 : 0 : struct ice_ctl_q_info *cq = &hw->adminq;
554 : : int status;
555 : :
556 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
557 : :
558 : 0 : status = ice_aq_get_fw_ver(hw, NULL);
559 [ # # ]: 0 : if (status)
560 : 0 : goto init_ctrlq_free_rq;
561 : :
562 [ # # ]: 0 : if (!ice_aq_ver_check(hw)) {
563 : : status = ICE_ERR_FW_API_VER;
564 : 0 : goto init_ctrlq_free_rq;
565 : : }
566 : :
567 : : return 0;
568 : :
569 : 0 : init_ctrlq_free_rq:
570 : 0 : ice_shutdown_rq(hw, cq);
571 : 0 : ice_shutdown_sq(hw, cq);
572 : 0 : return status;
573 : : }
574 : :
575 : : /**
576 : : * ice_init_ctrlq - main initialization routine for any control Queue
577 : : * @hw: pointer to the hardware structure
578 : : * @q_type: specific Control queue type
579 : : *
580 : : * Prior to calling this function, the driver *MUST* set the following fields
581 : : * in the cq->structure:
582 : : * - cq->num_sq_entries
583 : : * - cq->num_rq_entries
584 : : * - cq->rq_buf_size
585 : : * - cq->sq_buf_size
586 : : *
587 : : * NOTE: this function does not initialize the controlq locks
588 : : */
589 : 0 : static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
590 : : {
591 : : struct ice_ctl_q_info *cq;
592 : : int ret_code;
593 : :
594 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
595 : :
596 [ # # # # ]: 0 : switch (q_type) {
597 : 0 : case ICE_CTL_Q_ADMIN:
598 : 0 : ice_adminq_init_regs(hw);
599 : 0 : cq = &hw->adminq;
600 : 0 : break;
601 : 0 : case ICE_CTL_Q_SB:
602 : 0 : ice_sb_init_regs(hw);
603 : 0 : cq = &hw->sbq;
604 : 0 : break;
605 : : case ICE_CTL_Q_MAILBOX:
606 : : ice_mailbox_init_regs(hw);
607 : 0 : cq = &hw->mailboxq;
608 : 0 : break;
609 : : default:
610 : : return ICE_ERR_PARAM;
611 : : }
612 : 0 : cq->qtype = q_type;
613 : :
614 : : /* verify input for valid configuration */
615 [ # # # # ]: 0 : if (!cq->num_rq_entries || !cq->num_sq_entries ||
616 [ # # # # ]: 0 : !cq->rq_buf_size || !cq->sq_buf_size) {
617 : : return ICE_ERR_CFG;
618 : : }
619 : :
620 : : /* setup SQ command write back timeout */
621 : 0 : cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
622 : :
623 : : /* allocate the ATQ */
624 : 0 : ret_code = ice_init_sq(hw, cq);
625 [ # # ]: 0 : if (ret_code)
626 : : return ret_code;
627 : :
628 : : /* allocate the ARQ */
629 : 0 : ret_code = ice_init_rq(hw, cq);
630 [ # # ]: 0 : if (ret_code)
631 : 0 : goto init_ctrlq_free_sq;
632 : :
633 : : /* success! */
634 : : return 0;
635 : :
636 : : init_ctrlq_free_sq:
637 : 0 : ice_shutdown_sq(hw, cq);
638 : 0 : return ret_code;
639 : : }
640 : :
641 : : /**
642 : : * ice_is_sbq_supported - is the sideband queue supported
643 : : * @hw: pointer to the hardware structure
644 : : *
645 : : * Returns true if the sideband control queue interface is
646 : : * supported for the device, false otherwise
647 : : */
648 : : static bool ice_is_sbq_supported(struct ice_hw *hw)
649 : : {
650 : 0 : return ice_is_generic_mac(hw);
651 : : }
652 : :
653 : : /**
654 : : * ice_shutdown_ctrlq - shutdown routine for any control queue
655 : : * @hw: pointer to the hardware structure
656 : : * @q_type: specific Control queue type
657 : : * @unloading: is the driver unloading itself
658 : : *
659 : : * NOTE: this function does not destroy the control queue locks.
660 : : */
661 : : static void
662 : 0 : ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
663 : : bool unloading)
664 : : {
665 : : struct ice_ctl_q_info *cq;
666 : :
667 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
668 : :
669 [ # # # # ]: 0 : switch (q_type) {
670 : 0 : case ICE_CTL_Q_ADMIN:
671 : 0 : cq = &hw->adminq;
672 [ # # ]: 0 : if (ice_check_sq_alive(hw, cq))
673 : 0 : ice_aq_q_shutdown(hw, unloading);
674 : : break;
675 : 0 : case ICE_CTL_Q_SB:
676 : 0 : cq = &hw->sbq;
677 : 0 : break;
678 : 0 : case ICE_CTL_Q_MAILBOX:
679 : 0 : cq = &hw->mailboxq;
680 : 0 : break;
681 : : default:
682 : : return;
683 : : }
684 : :
685 : 0 : ice_shutdown_sq(hw, cq);
686 : 0 : ice_shutdown_rq(hw, cq);
687 : : }
688 : :
689 : : /**
690 : : * ice_shutdown_all_ctrlq - shutdown routine for all control queues
691 : : * @hw: pointer to the hardware structure
692 : : * @unloading: is the driver unloading itself
693 : : *
694 : : * NOTE: this function does not destroy the control queue locks. The driver
695 : : * may call this at runtime to shutdown and later restart control queues, such
696 : : * as in response to a reset event.
697 : : */
698 : 0 : void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
699 : : {
700 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
701 : : /* Shutdown FW admin queue */
702 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
703 : : /* Shutdown PHY Sideband */
704 [ # # ]: 0 : if (ice_is_sbq_supported(hw))
705 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
706 : : /* Shutdown PF-VF Mailbox */
707 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
708 : 0 : }
709 : :
710 : : /**
711 : : * ice_init_all_ctrlq - main initialization routine for all control queues
712 : : * @hw: pointer to the hardware structure
713 : : *
714 : : * Prior to calling this function, the driver MUST* set the following fields
715 : : * in the cq->structure for all control queues:
716 : : * - cq->num_sq_entries
717 : : * - cq->num_rq_entries
718 : : * - cq->rq_buf_size
719 : : * - cq->sq_buf_size
720 : : *
721 : : * NOTE: this function does not initialize the controlq locks.
722 : : */
723 : 0 : int ice_init_all_ctrlq(struct ice_hw *hw)
724 : : {
725 : : u32 retry = 0;
726 : : int status;
727 : :
728 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
729 : :
730 : : /* Init FW admin queue */
731 : : do {
732 : 0 : status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
733 [ # # ]: 0 : if (status)
734 : 0 : return status;
735 : :
736 : 0 : status = ice_init_check_adminq(hw);
737 [ # # ]: 0 : if (status != ICE_ERR_AQ_FW_CRITICAL)
738 : : break;
739 : :
740 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
741 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
742 : 0 : ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
743 [ # # ]: 0 : } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
744 : :
745 [ # # ]: 0 : if (status)
746 : : return status;
747 : : /* sideband control queue (SBQ) interface is not supported on some
748 : : * devices. Initialize if supported, else fallback to the admin queue
749 : : * interface
750 : : */
751 [ # # ]: 0 : if (ice_is_sbq_supported(hw)) {
752 : 0 : status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
753 [ # # ]: 0 : if (status)
754 : : return status;
755 : : }
756 : : /* Init Mailbox queue */
757 : 0 : return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
758 : : }
759 : :
760 : : /**
761 : : * ice_init_ctrlq_locks - Initialize locks for a control queue
762 : : * @cq: pointer to the control queue
763 : : *
764 : : * Initializes the send and receive queue locks for a given control queue.
765 : : */
766 : : static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
767 : : {
768 : : ice_init_lock(&cq->sq_lock);
769 : : ice_init_lock(&cq->rq_lock);
770 : 0 : }
771 : :
772 : : /**
773 : : * ice_create_all_ctrlq - main initialization routine for all control queues
774 : : * @hw: pointer to the hardware structure
775 : : *
776 : : * Prior to calling this function, the driver *MUST* set the following fields
777 : : * in the cq->structure for all control queues:
778 : : * - cq->num_sq_entries
779 : : * - cq->num_rq_entries
780 : : * - cq->rq_buf_size
781 : : * - cq->sq_buf_size
782 : : *
783 : : * This function creates all the control queue locks and then calls
784 : : * ice_init_all_ctrlq. It should be called once during driver load. If the
785 : : * driver needs to re-initialize control queues at run time it should call
786 : : * ice_init_all_ctrlq instead.
787 : : */
788 : 0 : int ice_create_all_ctrlq(struct ice_hw *hw)
789 : : {
790 : : ice_init_ctrlq_locks(&hw->adminq);
791 [ # # ]: 0 : if (ice_is_sbq_supported(hw))
792 : : ice_init_ctrlq_locks(&hw->sbq);
793 : : ice_init_ctrlq_locks(&hw->mailboxq);
794 : :
795 : 0 : return ice_init_all_ctrlq(hw);
796 : : }
797 : :
798 : : /**
799 : : * ice_destroy_ctrlq_locks - Destroy locks for a control queue
800 : : * @cq: pointer to the control queue
801 : : *
802 : : * Destroys the send and receive queue locks for a given control queue.
803 : : */
804 : : static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
805 : : {
806 : : ice_destroy_lock(&cq->sq_lock);
807 : : ice_destroy_lock(&cq->rq_lock);
808 : : }
809 : :
810 : : /**
811 : : * ice_destroy_all_ctrlq - exit routine for all control queues
812 : : * @hw: pointer to the hardware structure
813 : : *
814 : : * This function shuts down all the control queues and then destroys the
815 : : * control queue locks. It should be called once during driver unload. The
816 : : * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
817 : : * reinitialize control queues, such as in response to a reset event.
818 : : */
819 : 0 : void ice_destroy_all_ctrlq(struct ice_hw *hw)
820 : : {
821 : : /* shut down all the control queues first */
822 : 0 : ice_shutdown_all_ctrlq(hw, true);
823 : :
824 : : ice_destroy_ctrlq_locks(&hw->adminq);
825 : : if (ice_is_sbq_supported(hw))
826 : : ice_destroy_ctrlq_locks(&hw->sbq);
827 : : ice_destroy_ctrlq_locks(&hw->mailboxq);
828 : 0 : }
829 : :
830 : : /**
831 : : * ice_clean_sq - cleans send side of a control queue
832 : : * @hw: pointer to the hardware structure
833 : : * @cq: pointer to the specific Control queue
834 : : *
835 : : * returns the number of free desc
836 : : */
837 : 0 : static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
838 : : {
839 : : struct ice_ctl_q_ring *sq = &cq->sq;
840 : 0 : u16 ntc = sq->next_to_clean;
841 : : struct ice_aq_desc *desc;
842 : : u32 head;
843 : :
844 : 0 : desc = ICE_CTL_Q_DESC(*sq, ntc);
845 : :
846 : 0 : head = rd32(hw, sq->head);
847 [ # # ]: 0 : if (head >= sq->count) {
848 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG,
849 : : "Read head value (%d) exceeds allowed range.\n",
850 : : head);
851 : 0 : return 0;
852 : : }
853 : :
854 [ # # ]: 0 : while (head != ntc) {
855 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG,
856 : : "ntc %d head %d.\n",
857 : : ntc, head);
858 : : ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
859 : 0 : ntc++;
860 [ # # ]: 0 : if (ntc == sq->count)
861 : : ntc = 0;
862 : 0 : desc = ICE_CTL_Q_DESC(*sq, ntc);
863 : :
864 : 0 : head = rd32(hw, sq->head);
865 [ # # ]: 0 : if (head >= sq->count) {
866 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG,
867 : : "Read head value (%d) exceeds allowed range.\n",
868 : : head);
869 : 0 : return 0;
870 : : }
871 : : }
872 : :
873 : 0 : sq->next_to_clean = ntc;
874 : :
875 [ # # ]: 0 : return ICE_CTL_Q_DESC_UNUSED(sq);
876 : : }
877 : :
878 : : /**
879 : : * ice_ctl_q_str - Convert control queue type to string
880 : : * @qtype: the control queue type
881 : : *
882 : : * Returns: A string name for the given control queue type.
883 : : */
884 : : static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
885 : : {
886 : 0 : switch (qtype) {
887 : : case ICE_CTL_Q_UNKNOWN:
888 : : return "Unknown CQ";
889 : 0 : case ICE_CTL_Q_ADMIN:
890 : 0 : return "AQ";
891 : 0 : case ICE_CTL_Q_MAILBOX:
892 : 0 : return "MBXQ";
893 : 0 : case ICE_CTL_Q_SB:
894 : 0 : return "SBQ";
895 : 0 : default:
896 : 0 : return "Unrecognized CQ";
897 : : }
898 : : }
899 : :
900 : : /**
901 : : * ice_debug_cq
902 : : * @hw: pointer to the hardware structure
903 : : * @cq: pointer to the specific Control queue
904 : : * @desc: pointer to control queue descriptor
905 : : * @buf: pointer to command buffer
906 : : * @buf_len: max length of buf
907 : : * @response: true if this is the writeback response
908 : : *
909 : : * Dumps debug log about control command with descriptor contents.
910 : : */
911 : : static void
912 : 0 : ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
913 : : void *desc, void *buf, u16 buf_len, bool response)
914 : : {
915 : : struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
916 : : u16 datalen, flags;
917 : :
918 [ # # ]: 0 : if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
919 : : return;
920 : :
921 [ # # ]: 0 : if (!desc)
922 : : return;
923 : :
924 : 0 : datalen = LE16_TO_CPU(cq_desc->datalen);
925 : 0 : flags = LE16_TO_CPU(cq_desc->flags);
926 : :
927 [ # # # # : 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
# # # #
# ]
928 : : ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
929 : : LE16_TO_CPU(cq_desc->opcode), flags, datalen,
930 : : LE16_TO_CPU(cq_desc->retval));
931 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
932 : : LE32_TO_CPU(cq_desc->cookie_high),
933 : : LE32_TO_CPU(cq_desc->cookie_low));
934 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
935 : : LE32_TO_CPU(cq_desc->params.generic.param0),
936 : : LE32_TO_CPU(cq_desc->params.generic.param1));
937 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
938 : : LE32_TO_CPU(cq_desc->params.generic.addr_high),
939 : : LE32_TO_CPU(cq_desc->params.generic.addr_low));
940 : : /* Dump buffer iff 1) one exists and 2) is either a response indicated
941 : : * by the DD and/or CMP flag set or a command with the RD flag set.
942 : : */
943 [ # # # # : 0 : if (buf && cq_desc->datalen != 0 &&
# # ]
944 : : (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
945 : : flags & ICE_AQ_FLAG_RD)) {
946 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
947 [ # # # # ]: 0 : ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
948 : : MIN_T(u16, buf_len, datalen));
949 : : }
950 : : }
951 : :
952 : : /**
953 : : * ice_sq_done - check if the last send on a control queue has completed
954 : : * @hw: pointer to the HW struct
955 : : * @cq: pointer to the specific Control queue
956 : : *
957 : : * Returns: true if all the descriptors on the send side of a control queue
958 : : * are finished processing, false otherwise.
959 : : */
960 : : static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
961 : : {
962 : : /* control queue designers suggest use of head for better
963 : : * timing reliability than DD bit
964 : : */
965 : 0 : return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
966 : : }
967 : :
968 : : /**
969 : : * ice_sq_send_cmd_nolock - send command to a control queue
970 : : * @hw: pointer to the HW struct
971 : : * @cq: pointer to the specific Control queue
972 : : * @desc: prefilled descriptor describing the command (non DMA mem)
973 : : * @buf: buffer to use for indirect commands (or NULL for direct commands)
974 : : * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
975 : : * @cd: pointer to command details structure
976 : : *
977 : : * This is the main send command routine for a control queue. It prepares the
978 : : * command into a descriptor, bumps the send queue tail, waits for the command
979 : : * to complete, captures status and data for the command, etc.
980 : : */
981 : : int
982 : 0 : ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
983 : : struct ice_aq_desc *desc, void *buf, u16 buf_size,
984 : : struct ice_sq_cd *cd)
985 : : {
986 : : struct ice_dma_mem *dma_buf = NULL;
987 : : struct ice_aq_desc *desc_on_ring;
988 : : bool cmd_completed = false;
989 : : u32 total_delay = 0;
990 : : int status = 0;
991 : : u16 retval = 0;
992 : : u32 val = 0;
993 : :
994 : : /* if reset is in progress return a soft error */
995 [ # # ]: 0 : if (hw->reset_ongoing)
996 : : return ICE_ERR_RESET_ONGOING;
997 : :
998 : 0 : cq->sq_last_status = ICE_AQ_RC_OK;
999 : :
1000 [ # # ]: 0 : if (!cq->sq.count) {
1001 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
1002 : : status = ICE_ERR_AQ_EMPTY;
1003 : 0 : goto sq_send_command_error;
1004 : : }
1005 : :
1006 [ # # ]: 0 : if ((buf && !buf_size) || (!buf && buf_size)) {
1007 : : status = ICE_ERR_PARAM;
1008 : 0 : goto sq_send_command_error;
1009 : : }
1010 : :
1011 [ # # ]: 0 : if (buf) {
1012 [ # # ]: 0 : if (buf_size > cq->sq_buf_size) {
1013 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
1014 : : buf_size);
1015 : : status = ICE_ERR_INVAL_SIZE;
1016 : 0 : goto sq_send_command_error;
1017 : : }
1018 : :
1019 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1020 [ # # ]: 0 : if (buf_size > ICE_AQ_LG_BUF)
1021 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1022 : : }
1023 : :
1024 : 0 : val = rd32(hw, cq->sq.head);
1025 [ # # ]: 0 : if (val >= cq->num_sq_entries) {
1026 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
1027 : : val);
1028 : : status = ICE_ERR_AQ_EMPTY;
1029 : 0 : goto sq_send_command_error;
1030 : : }
1031 : :
1032 : : /* Call clean and check queue available function to reclaim the
1033 : : * descriptors that were processed by FW/MBX; the function returns the
1034 : : * number of desc available. The clean function called here could be
1035 : : * called in a separate thread in case of asynchronous completions.
1036 : : */
1037 [ # # ]: 0 : if (ice_clean_sq(hw, cq) == 0) {
1038 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1039 : : status = ICE_ERR_AQ_FULL;
1040 : 0 : goto sq_send_command_error;
1041 : : }
1042 : :
1043 : : /* initialize the temp desc pointer with the right desc */
1044 [ # # ]: 0 : desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1045 : :
1046 : : /* if the desc is available copy the temp desc to the right place */
1047 : : ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1048 : : ICE_NONDMA_TO_DMA);
1049 : :
1050 : : /* if buf is not NULL assume indirect command */
1051 [ # # ]: 0 : if (buf) {
1052 : 0 : dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1053 : : /* copy the user buf into the respective DMA buf */
1054 [ # # ]: 0 : ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1055 : 0 : desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1056 : :
1057 : : /* Update the address values in the desc with the pa value
1058 : : * for respective buffer
1059 : : */
1060 : 0 : desc_on_ring->params.generic.addr_high =
1061 : 0 : CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1062 : 0 : desc_on_ring->params.generic.addr_low =
1063 : 0 : CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1064 : : }
1065 : :
1066 : : /* Debug desc and buffer */
1067 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1068 : 0 : ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
1069 : :
1070 : 0 : (cq->sq.next_to_use)++;
1071 [ # # ]: 0 : if (cq->sq.next_to_use == cq->sq.count)
1072 : 0 : cq->sq.next_to_use = 0;
1073 : 0 : wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1074 : 0 : ice_flush(hw);
1075 : :
1076 : : /* Wait a short time before initial ice_sq_done() check, to allow
1077 : : * hardware time for completion.
1078 : : */
1079 : 0 : ice_usec_delay(5, false);
1080 : :
1081 : : do {
1082 [ # # ]: 0 : if (ice_sq_done(hw, cq))
1083 : : break;
1084 : :
1085 : 0 : ice_usec_delay(10, false);
1086 : 0 : total_delay++;
1087 [ # # ]: 0 : } while (total_delay < cq->sq_cmd_timeout);
1088 : :
1089 : : /* if ready, copy the desc back to temp */
1090 [ # # ]: 0 : if (ice_sq_done(hw, cq)) {
1091 : : ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1092 : : ICE_DMA_TO_NONDMA);
1093 [ # # ]: 0 : if (buf) {
1094 : : /* get returned length to copy */
1095 : 0 : u16 copy_size = LE16_TO_CPU(desc->datalen);
1096 : :
1097 [ # # ]: 0 : if (copy_size > buf_size) {
1098 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1099 : : copy_size, buf_size);
1100 : : status = ICE_ERR_AQ_ERROR;
1101 : : } else {
1102 [ # # ]: 0 : ice_memcpy(buf, dma_buf->va, copy_size,
1103 : : ICE_DMA_TO_NONDMA);
1104 : : }
1105 : : }
1106 : 0 : retval = LE16_TO_CPU(desc->retval);
1107 [ # # ]: 0 : if (retval) {
1108 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1109 : : LE16_TO_CPU(desc->opcode),
1110 : : retval);
1111 : :
1112 : : /* strip off FW internal code */
1113 : 0 : retval &= 0xff;
1114 : : }
1115 : : cmd_completed = true;
1116 [ # # ]: 0 : if (!status && retval != ICE_AQ_RC_OK)
1117 : : status = ICE_ERR_AQ_ERROR;
1118 : 0 : cq->sq_last_status = (enum ice_aq_err)retval;
1119 : : }
1120 : :
1121 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1122 : 0 : ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
1123 : :
1124 : : /* save writeback AQ if requested */
1125 [ # # # # ]: 0 : if (cd && cd->wb_desc)
1126 : : ice_memcpy(cd->wb_desc, desc_on_ring,
1127 : : sizeof(*cd->wb_desc), ICE_DMA_TO_NONDMA);
1128 : :
1129 : : /* update the error if time out occurred */
1130 [ # # ]: 0 : if (!cmd_completed) {
1131 [ # # ]: 0 : if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1132 [ # # ]: 0 : rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1133 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1134 : : status = ICE_ERR_AQ_FW_CRITICAL;
1135 : : } else {
1136 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1137 : : status = ICE_ERR_AQ_TIMEOUT;
1138 : : }
1139 : : }
1140 : :
1141 : 0 : sq_send_command_error:
1142 : : return status;
1143 : : }
1144 : :
1145 : : /**
1146 : : * ice_sq_send_cmd - send command to a control queue
1147 : : * @hw: pointer to the HW struct
1148 : : * @cq: pointer to the specific Control queue
1149 : : * @desc: prefilled descriptor describing the command
1150 : : * @buf: buffer to use for indirect commands (or NULL for direct commands)
1151 : : * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1152 : : * @cd: pointer to command details structure
1153 : : *
1154 : : * Main command for the transmit side of a control queue. It puts the command
1155 : : * on the queue, bumps the tail, waits for processing of the command, captures
1156 : : * command status and results, etc.
1157 : : */
1158 : : int
1159 : 0 : ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1160 : : struct ice_aq_desc *desc, void *buf, u16 buf_size,
1161 : : struct ice_sq_cd *cd)
1162 : : {
1163 : : int status = 0;
1164 : :
1165 : : /* if reset is in progress return a soft error */
1166 [ # # ]: 0 : if (hw->reset_ongoing)
1167 : : return ICE_ERR_RESET_ONGOING;
1168 : :
1169 : 0 : ice_acquire_lock(&cq->sq_lock);
1170 : 0 : status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1171 : : ice_release_lock(&cq->sq_lock);
1172 : :
1173 : 0 : return status;
1174 : : }
1175 : :
1176 : : /**
1177 : : * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1178 : : * @desc: pointer to the temp descriptor (non DMA mem)
1179 : : * @opcode: the opcode can be used to decide which flags to turn off or on
1180 : : *
1181 : : * Fill the desc with default values
1182 : : */
1183 : 0 : void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1184 : : {
1185 : : /* zero out the desc */
1186 : : ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1187 : 0 : desc->opcode = CPU_TO_LE16(opcode);
1188 : 0 : desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1189 : 0 : }
1190 : :
1191 : : /**
1192 : : * ice_clean_rq_elem
1193 : : * @hw: pointer to the HW struct
1194 : : * @cq: pointer to the specific Control queue
1195 : : * @e: event info from the receive descriptor, includes any buffers
1196 : : * @pending: number of events that could be left to process
1197 : : *
1198 : : * Clean one element from the receive side of a control queue. On return 'e'
1199 : : * contains contents of the message, and 'pending' contains the number of
1200 : : * events left to process.
1201 : : */
1202 : : int
1203 : 0 : ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1204 : : struct ice_rq_event_info *e, u16 *pending)
1205 : : {
1206 : 0 : u16 ntc = cq->rq.next_to_clean;
1207 : : enum ice_aq_err rq_last_status;
1208 : : struct ice_aq_desc *desc;
1209 : : struct ice_dma_mem *bi;
1210 : : int ret_code = 0;
1211 : : u16 desc_idx;
1212 : : u16 datalen;
1213 : : u16 flags;
1214 : : u16 ntu;
1215 : :
1216 : : /* pre-clean the event info */
1217 : 0 : ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1218 : :
1219 : : /* take the lock before we start messing with the ring */
1220 : 0 : ice_acquire_lock(&cq->rq_lock);
1221 : :
1222 [ # # ]: 0 : if (!cq->rq.count) {
1223 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1224 : : ret_code = ICE_ERR_AQ_EMPTY;
1225 : 0 : goto clean_rq_elem_err;
1226 : : }
1227 : :
1228 : : /* set next_to_use to head */
1229 : 0 : ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1230 : :
1231 [ # # ]: 0 : if (ntu == ntc) {
1232 : : /* nothing to do - shouldn't need to update ring's values */
1233 : : ret_code = ICE_ERR_AQ_NO_WORK;
1234 : 0 : goto clean_rq_elem_out;
1235 : : }
1236 : :
1237 : : /* now clean the next descriptor */
1238 : 0 : desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1239 : : desc_idx = ntc;
1240 : :
1241 : 0 : rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1242 : 0 : flags = LE16_TO_CPU(desc->flags);
1243 [ # # ]: 0 : if (flags & ICE_AQ_FLAG_ERR) {
1244 : : ret_code = ICE_ERR_AQ_ERROR;
1245 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1246 : : LE16_TO_CPU(desc->opcode), rq_last_status);
1247 : : }
1248 : : ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1249 : 0 : datalen = LE16_TO_CPU(desc->datalen);
1250 : 0 : e->msg_len = MIN_T(u16, datalen, e->buf_len);
1251 [ # # # # ]: 0 : if (e->msg_buf && e->msg_len)
1252 [ # # ]: 0 : ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1253 : : e->msg_len, ICE_DMA_TO_NONDMA);
1254 : :
1255 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1256 : 0 : ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
1257 : :
1258 : : /* Restore the original datalen and buffer address in the desc,
1259 : : * FW updates datalen to indicate the event message size
1260 : : */
1261 [ # # ]: 0 : bi = &cq->rq.r.rq_bi[ntc];
1262 : : ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1263 : :
1264 : 0 : desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1265 [ # # ]: 0 : if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1266 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1267 : 0 : desc->datalen = CPU_TO_LE16(bi->size);
1268 : 0 : desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1269 : 0 : desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1270 : :
1271 : : /* set tail = the last cleaned desc index. */
1272 : 0 : wr32(hw, cq->rq.tail, ntc);
1273 : : /* ntc is updated to tail + 1 */
1274 : 0 : ntc++;
1275 [ # # ]: 0 : if (ntc == cq->num_rq_entries)
1276 : : ntc = 0;
1277 : 0 : cq->rq.next_to_clean = ntc;
1278 : 0 : cq->rq.next_to_use = ntu;
1279 : :
1280 : 0 : clean_rq_elem_out:
1281 : : /* Set pending if needed, unlock and return */
1282 [ # # ]: 0 : if (pending) {
1283 : : /* re-read HW head to calculate actual pending messages */
1284 : 0 : ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1285 [ # # ]: 0 : *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1286 : : }
1287 : 0 : clean_rq_elem_err:
1288 : : ice_release_lock(&cq->rq_lock);
1289 : :
1290 : 0 : return ret_code;
1291 : : }
|