]> Git Repo - J-linux.git/blob - drivers/net/ethernet/intel/ice/ice_controlq.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / ethernet / intel / ice / ice_controlq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_common.h"
5
6 #define ICE_CQ_INIT_REGS(qinfo, prefix)                         \
7 do {                                                            \
8         (qinfo)->sq.head = prefix##_ATQH;                       \
9         (qinfo)->sq.tail = prefix##_ATQT;                       \
10         (qinfo)->sq.len = prefix##_ATQLEN;                      \
11         (qinfo)->sq.bah = prefix##_ATQBAH;                      \
12         (qinfo)->sq.bal = prefix##_ATQBAL;                      \
13         (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;        \
14         (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15         (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;  \
16         (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;           \
17         (qinfo)->rq.head = prefix##_ARQH;                       \
18         (qinfo)->rq.tail = prefix##_ARQT;                       \
19         (qinfo)->rq.len = prefix##_ARQLEN;                      \
20         (qinfo)->rq.bah = prefix##_ARQBAH;                      \
21         (qinfo)->rq.bal = prefix##_ARQBAL;                      \
22         (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;        \
23         (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24         (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;  \
25         (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;           \
26 } while (0)
27
28 /**
29  * ice_adminq_init_regs - Initialize AdminQ registers
30  * @hw: pointer to the hardware structure
31  *
32  * This assumes the alloc_sq and alloc_rq functions have already been called
33  */
34 static void ice_adminq_init_regs(struct ice_hw *hw)
35 {
36         struct ice_ctl_q_info *cq = &hw->adminq;
37
38         ICE_CQ_INIT_REGS(cq, PF_FW);
39 }
40
41 /**
42  * ice_mailbox_init_regs - Initialize Mailbox registers
43  * @hw: pointer to the hardware structure
44  *
45  * This assumes the alloc_sq and alloc_rq functions have already been called
46  */
47 static void ice_mailbox_init_regs(struct ice_hw *hw)
48 {
49         struct ice_ctl_q_info *cq = &hw->mailboxq;
50
51         ICE_CQ_INIT_REGS(cq, PF_MBX);
52 }
53
54 /**
55  * ice_sb_init_regs - Initialize Sideband registers
56  * @hw: pointer to the hardware structure
57  *
58  * This assumes the alloc_sq and alloc_rq functions have already been called
59  */
60 static void ice_sb_init_regs(struct ice_hw *hw)
61 {
62         struct ice_ctl_q_info *cq = &hw->sbq;
63
64         ICE_CQ_INIT_REGS(cq, PF_SB);
65 }
66
67 /**
68  * ice_check_sq_alive
69  * @hw: pointer to the HW struct
70  * @cq: pointer to the specific Control queue
71  *
72  * Returns true if Queue is enabled else false.
73  */
74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
75 {
76         /* check both queue-length and queue-enable fields */
77         if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
78                 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
79                                                 cq->sq.len_ena_mask)) ==
80                         (cq->num_sq_entries | cq->sq.len_ena_mask);
81
82         return false;
83 }
84
85 /**
86  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
87  * @hw: pointer to the hardware structure
88  * @cq: pointer to the specific Control queue
89  */
90 static int
91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
92 {
93         size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
94
95         cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
96                                                  &cq->sq.desc_buf.pa,
97                                                  GFP_KERNEL | __GFP_ZERO);
98         if (!cq->sq.desc_buf.va)
99                 return -ENOMEM;
100         cq->sq.desc_buf.size = size;
101
102         return 0;
103 }
104
105 /**
106  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
107  * @hw: pointer to the hardware structure
108  * @cq: pointer to the specific Control queue
109  */
110 static int
111 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
112 {
113         size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
114
115         cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
116                                                  &cq->rq.desc_buf.pa,
117                                                  GFP_KERNEL | __GFP_ZERO);
118         if (!cq->rq.desc_buf.va)
119                 return -ENOMEM;
120         cq->rq.desc_buf.size = size;
121         return 0;
122 }
123
124 /**
125  * ice_free_cq_ring - Free control queue ring
126  * @hw: pointer to the hardware structure
127  * @ring: pointer to the specific control queue ring
128  *
129  * This assumes the posted buffers have already been cleaned
130  * and de-allocated
131  */
132 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
133 {
134         dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
135                            ring->desc_buf.va, ring->desc_buf.pa);
136         ring->desc_buf.va = NULL;
137         ring->desc_buf.pa = 0;
138         ring->desc_buf.size = 0;
139 }
140
141 /**
142  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
143  * @hw: pointer to the hardware structure
144  * @cq: pointer to the specific Control queue
145  */
146 static int
147 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
148 {
149         int i;
150
151         /* We'll be allocating the buffer info memory first, then we can
152          * allocate the mapped buffers for the event processing
153          */
154         cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
155                                        sizeof(cq->rq.desc_buf), GFP_KERNEL);
156         if (!cq->rq.dma_head)
157                 return -ENOMEM;
158         cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
159
160         /* allocate the mapped buffers */
161         for (i = 0; i < cq->num_rq_entries; i++) {
162                 struct ice_aq_desc *desc;
163                 struct ice_dma_mem *bi;
164
165                 bi = &cq->rq.r.rq_bi[i];
166                 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
167                                              cq->rq_buf_size, &bi->pa,
168                                              GFP_KERNEL | __GFP_ZERO);
169                 if (!bi->va)
170                         goto unwind_alloc_rq_bufs;
171                 bi->size = cq->rq_buf_size;
172
173                 /* now configure the descriptors for use */
174                 desc = ICE_CTL_Q_DESC(cq->rq, i);
175
176                 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
177                 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
178                         desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
179                 desc->opcode = 0;
180                 /* This is in accordance with control queue design, there is no
181                  * register for buffer size configuration
182                  */
183                 desc->datalen = cpu_to_le16(bi->size);
184                 desc->retval = 0;
185                 desc->cookie_high = 0;
186                 desc->cookie_low = 0;
187                 desc->params.generic.addr_high =
188                         cpu_to_le32(upper_32_bits(bi->pa));
189                 desc->params.generic.addr_low =
190                         cpu_to_le32(lower_32_bits(bi->pa));
191                 desc->params.generic.param0 = 0;
192                 desc->params.generic.param1 = 0;
193         }
194         return 0;
195
196 unwind_alloc_rq_bufs:
197         /* don't try to free the one that failed... */
198         i--;
199         for (; i >= 0; i--) {
200                 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
201                                    cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
202                 cq->rq.r.rq_bi[i].va = NULL;
203                 cq->rq.r.rq_bi[i].pa = 0;
204                 cq->rq.r.rq_bi[i].size = 0;
205         }
206         cq->rq.r.rq_bi = NULL;
207         devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
208         cq->rq.dma_head = NULL;
209
210         return -ENOMEM;
211 }
212
213 /**
214  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
215  * @hw: pointer to the hardware structure
216  * @cq: pointer to the specific Control queue
217  */
218 static int
219 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
220 {
221         int i;
222
223         /* No mapped memory needed yet, just the buffer info structures */
224         cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
225                                        sizeof(cq->sq.desc_buf), GFP_KERNEL);
226         if (!cq->sq.dma_head)
227                 return -ENOMEM;
228         cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
229
230         /* allocate the mapped buffers */
231         for (i = 0; i < cq->num_sq_entries; i++) {
232                 struct ice_dma_mem *bi;
233
234                 bi = &cq->sq.r.sq_bi[i];
235                 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
236                                              cq->sq_buf_size, &bi->pa,
237                                              GFP_KERNEL | __GFP_ZERO);
238                 if (!bi->va)
239                         goto unwind_alloc_sq_bufs;
240                 bi->size = cq->sq_buf_size;
241         }
242         return 0;
243
244 unwind_alloc_sq_bufs:
245         /* don't try to free the one that failed... */
246         i--;
247         for (; i >= 0; i--) {
248                 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
249                                    cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
250                 cq->sq.r.sq_bi[i].va = NULL;
251                 cq->sq.r.sq_bi[i].pa = 0;
252                 cq->sq.r.sq_bi[i].size = 0;
253         }
254         cq->sq.r.sq_bi = NULL;
255         devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
256         cq->sq.dma_head = NULL;
257
258         return -ENOMEM;
259 }
260
261 static int
262 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
263 {
264         /* Clear Head and Tail */
265         wr32(hw, ring->head, 0);
266         wr32(hw, ring->tail, 0);
267
268         /* set starting point */
269         wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
270         wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
271         wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
272
273         /* Check one register to verify that config was applied */
274         if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
275                 return -EIO;
276
277         return 0;
278 }
279
280 /**
281  * ice_cfg_sq_regs - configure Control ATQ registers
282  * @hw: pointer to the hardware structure
283  * @cq: pointer to the specific Control queue
284  *
285  * Configure base address and length registers for the transmit queue
286  */
287 static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
288 {
289         return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
290 }
291
292 /**
293  * ice_cfg_rq_regs - configure Control ARQ register
294  * @hw: pointer to the hardware structure
295  * @cq: pointer to the specific Control queue
296  *
297  * Configure base address and length registers for the receive (event queue)
298  */
299 static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
300 {
301         int status;
302
303         status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
304         if (status)
305                 return status;
306
307         /* Update tail in the HW to post pre-allocated buffers */
308         wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
309
310         return 0;
311 }
312
313 #define ICE_FREE_CQ_BUFS(hw, qi, ring)                                  \
314 do {                                                                    \
315         /* free descriptors */                                          \
316         if ((qi)->ring.r.ring##_bi) {                                   \
317                 int i;                                                  \
318                                                                         \
319                 for (i = 0; i < (qi)->num_##ring##_entries; i++)        \
320                         if ((qi)->ring.r.ring##_bi[i].pa) {             \
321                                 dmam_free_coherent(ice_hw_to_dev(hw),   \
322                                         (qi)->ring.r.ring##_bi[i].size, \
323                                         (qi)->ring.r.ring##_bi[i].va,   \
324                                         (qi)->ring.r.ring##_bi[i].pa);  \
325                                         (qi)->ring.r.ring##_bi[i].va = NULL;\
326                                         (qi)->ring.r.ring##_bi[i].pa = 0;\
327                                         (qi)->ring.r.ring##_bi[i].size = 0;\
328                 }                                                       \
329         }                                                               \
330         /* free DMA head */                                             \
331         devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);             \
332 } while (0)
333
334 /**
335  * ice_init_sq - main initialization routine for Control ATQ
336  * @hw: pointer to the hardware structure
337  * @cq: pointer to the specific Control queue
338  *
339  * This is the main initialization routine for the Control Send Queue
340  * Prior to calling this function, the driver *MUST* set the following fields
341  * in the cq->structure:
342  *     - cq->num_sq_entries
343  *     - cq->sq_buf_size
344  *
345  * Do *NOT* hold the lock when calling this as the memory allocation routines
346  * called are not going to be atomic context safe
347  */
348 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
349 {
350         int ret_code;
351
352         if (cq->sq.count > 0) {
353                 /* queue already initialized */
354                 ret_code = -EBUSY;
355                 goto init_ctrlq_exit;
356         }
357
358         /* verify input for valid configuration */
359         if (!cq->num_sq_entries || !cq->sq_buf_size) {
360                 ret_code = -EIO;
361                 goto init_ctrlq_exit;
362         }
363
364         cq->sq.next_to_use = 0;
365         cq->sq.next_to_clean = 0;
366
367         /* allocate the ring memory */
368         ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
369         if (ret_code)
370                 goto init_ctrlq_exit;
371
372         /* allocate buffers in the rings */
373         ret_code = ice_alloc_sq_bufs(hw, cq);
374         if (ret_code)
375                 goto init_ctrlq_free_rings;
376
377         /* initialize base registers */
378         ret_code = ice_cfg_sq_regs(hw, cq);
379         if (ret_code)
380                 goto init_ctrlq_free_rings;
381
382         /* success! */
383         cq->sq.count = cq->num_sq_entries;
384         goto init_ctrlq_exit;
385
386 init_ctrlq_free_rings:
387         ICE_FREE_CQ_BUFS(hw, cq, sq);
388         ice_free_cq_ring(hw, &cq->sq);
389
390 init_ctrlq_exit:
391         return ret_code;
392 }
393
394 /**
395  * ice_init_rq - initialize receive side of a control queue
396  * @hw: pointer to the hardware structure
397  * @cq: pointer to the specific Control queue
398  *
399  * The main initialization routine for Receive side of a control queue.
400  * Prior to calling this function, the driver *MUST* set the following fields
401  * in the cq->structure:
402  *     - cq->num_rq_entries
403  *     - cq->rq_buf_size
404  *
405  * Do *NOT* hold the lock when calling this as the memory allocation routines
406  * called are not going to be atomic context safe
407  */
408 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
409 {
410         int ret_code;
411
412         if (cq->rq.count > 0) {
413                 /* queue already initialized */
414                 ret_code = -EBUSY;
415                 goto init_ctrlq_exit;
416         }
417
418         /* verify input for valid configuration */
419         if (!cq->num_rq_entries || !cq->rq_buf_size) {
420                 ret_code = -EIO;
421                 goto init_ctrlq_exit;
422         }
423
424         cq->rq.next_to_use = 0;
425         cq->rq.next_to_clean = 0;
426
427         /* allocate the ring memory */
428         ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
429         if (ret_code)
430                 goto init_ctrlq_exit;
431
432         /* allocate buffers in the rings */
433         ret_code = ice_alloc_rq_bufs(hw, cq);
434         if (ret_code)
435                 goto init_ctrlq_free_rings;
436
437         /* initialize base registers */
438         ret_code = ice_cfg_rq_regs(hw, cq);
439         if (ret_code)
440                 goto init_ctrlq_free_rings;
441
442         /* success! */
443         cq->rq.count = cq->num_rq_entries;
444         goto init_ctrlq_exit;
445
446 init_ctrlq_free_rings:
447         ICE_FREE_CQ_BUFS(hw, cq, rq);
448         ice_free_cq_ring(hw, &cq->rq);
449
450 init_ctrlq_exit:
451         return ret_code;
452 }
453
454 /**
455  * ice_shutdown_sq - shutdown the transmit side of a control queue
456  * @hw: pointer to the hardware structure
457  * @cq: pointer to the specific Control queue
458  *
459  * The main shutdown routine for the Control Transmit Queue
460  */
461 static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
462 {
463         int ret_code = 0;
464
465         mutex_lock(&cq->sq_lock);
466
467         if (!cq->sq.count) {
468                 ret_code = -EBUSY;
469                 goto shutdown_sq_out;
470         }
471
472         /* Stop processing of the control queue */
473         wr32(hw, cq->sq.head, 0);
474         wr32(hw, cq->sq.tail, 0);
475         wr32(hw, cq->sq.len, 0);
476         wr32(hw, cq->sq.bal, 0);
477         wr32(hw, cq->sq.bah, 0);
478
479         cq->sq.count = 0;       /* to indicate uninitialized queue */
480
481         /* free ring buffers and the ring itself */
482         ICE_FREE_CQ_BUFS(hw, cq, sq);
483         ice_free_cq_ring(hw, &cq->sq);
484
485 shutdown_sq_out:
486         mutex_unlock(&cq->sq_lock);
487         return ret_code;
488 }
489
490 /**
491  * ice_aq_ver_check - Check the reported AQ API version
492  * @hw: pointer to the hardware structure
493  *
494  * Checks if the driver should load on a given AQ API version.
495  *
496  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
497  */
498 static bool ice_aq_ver_check(struct ice_hw *hw)
499 {
500         u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
501         u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
502
503         if (hw->api_maj_ver > exp_fw_api_ver_major) {
504                 /* Major API version is newer than expected, don't load */
505                 dev_warn(ice_hw_to_dev(hw),
506                          "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
507                 return false;
508         } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
509                 if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
510                         dev_info(ice_hw_to_dev(hw),
511                                  "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
512                                  hw->api_maj_ver, hw->api_min_ver,
513                                  exp_fw_api_ver_major, exp_fw_api_ver_minor);
514                 else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
515                         dev_info(ice_hw_to_dev(hw),
516                                  "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
517                                  hw->api_maj_ver, hw->api_min_ver,
518                                  exp_fw_api_ver_major, exp_fw_api_ver_minor);
519         } else {
520                 /* Major API version is older than expected, log a warning */
521                 dev_info(ice_hw_to_dev(hw),
522                          "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
523                          hw->api_maj_ver, hw->api_min_ver,
524                          exp_fw_api_ver_major, exp_fw_api_ver_minor);
525         }
526         return true;
527 }
528
529 /**
530  * ice_shutdown_rq - shutdown Control ARQ
531  * @hw: pointer to the hardware structure
532  * @cq: pointer to the specific Control queue
533  *
534  * The main shutdown routine for the Control Receive Queue
535  */
536 static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
537 {
538         int ret_code = 0;
539
540         mutex_lock(&cq->rq_lock);
541
542         if (!cq->rq.count) {
543                 ret_code = -EBUSY;
544                 goto shutdown_rq_out;
545         }
546
547         /* Stop Control Queue processing */
548         wr32(hw, cq->rq.head, 0);
549         wr32(hw, cq->rq.tail, 0);
550         wr32(hw, cq->rq.len, 0);
551         wr32(hw, cq->rq.bal, 0);
552         wr32(hw, cq->rq.bah, 0);
553
554         /* set rq.count to 0 to indicate uninitialized queue */
555         cq->rq.count = 0;
556
557         /* free ring buffers and the ring itself */
558         ICE_FREE_CQ_BUFS(hw, cq, rq);
559         ice_free_cq_ring(hw, &cq->rq);
560
561 shutdown_rq_out:
562         mutex_unlock(&cq->rq_lock);
563         return ret_code;
564 }
565
566 /**
567  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
568  * @hw: pointer to the hardware structure
569  */
570 static int ice_init_check_adminq(struct ice_hw *hw)
571 {
572         struct ice_ctl_q_info *cq = &hw->adminq;
573         int status;
574
575         status = ice_aq_get_fw_ver(hw, NULL);
576         if (status)
577                 goto init_ctrlq_free_rq;
578
579         if (!ice_aq_ver_check(hw)) {
580                 status = -EIO;
581                 goto init_ctrlq_free_rq;
582         }
583
584         return 0;
585
586 init_ctrlq_free_rq:
587         ice_shutdown_rq(hw, cq);
588         ice_shutdown_sq(hw, cq);
589         return status;
590 }
591
592 /**
593  * ice_init_ctrlq - main initialization routine for any control Queue
594  * @hw: pointer to the hardware structure
595  * @q_type: specific Control queue type
596  *
597  * Prior to calling this function, the driver *MUST* set the following fields
598  * in the cq->structure:
599  *     - cq->num_sq_entries
600  *     - cq->num_rq_entries
601  *     - cq->rq_buf_size
602  *     - cq->sq_buf_size
603  *
604  * NOTE: this function does not initialize the controlq locks
605  */
606 static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
607 {
608         struct ice_ctl_q_info *cq;
609         int ret_code;
610
611         switch (q_type) {
612         case ICE_CTL_Q_ADMIN:
613                 ice_adminq_init_regs(hw);
614                 cq = &hw->adminq;
615                 break;
616         case ICE_CTL_Q_SB:
617                 ice_sb_init_regs(hw);
618                 cq = &hw->sbq;
619                 break;
620         case ICE_CTL_Q_MAILBOX:
621                 ice_mailbox_init_regs(hw);
622                 cq = &hw->mailboxq;
623                 break;
624         default:
625                 return -EINVAL;
626         }
627         cq->qtype = q_type;
628
629         /* verify input for valid configuration */
630         if (!cq->num_rq_entries || !cq->num_sq_entries ||
631             !cq->rq_buf_size || !cq->sq_buf_size) {
632                 return -EIO;
633         }
634
635         /* allocate the ATQ */
636         ret_code = ice_init_sq(hw, cq);
637         if (ret_code)
638                 return ret_code;
639
640         /* allocate the ARQ */
641         ret_code = ice_init_rq(hw, cq);
642         if (ret_code)
643                 goto init_ctrlq_free_sq;
644
645         /* success! */
646         return 0;
647
648 init_ctrlq_free_sq:
649         ice_shutdown_sq(hw, cq);
650         return ret_code;
651 }
652
653 /**
654  * ice_is_sbq_supported - is the sideband queue supported
655  * @hw: pointer to the hardware structure
656  *
657  * Returns true if the sideband control queue interface is
658  * supported for the device, false otherwise
659  */
660 bool ice_is_sbq_supported(struct ice_hw *hw)
661 {
662         /* The device sideband queue is only supported on devices with the
663          * generic MAC type.
664          */
665         return ice_is_generic_mac(hw);
666 }
667
668 /**
669  * ice_get_sbq - returns the right control queue to use for sideband
670  * @hw: pointer to the hardware structure
671  */
672 struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
673 {
674         if (ice_is_sbq_supported(hw))
675                 return &hw->sbq;
676         return &hw->adminq;
677 }
678
679 /**
680  * ice_shutdown_ctrlq - shutdown routine for any control queue
681  * @hw: pointer to the hardware structure
682  * @q_type: specific Control queue type
683  * @unloading: is the driver unloading itself
684  *
685  * NOTE: this function does not destroy the control queue locks.
686  */
687 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
688                                bool unloading)
689 {
690         struct ice_ctl_q_info *cq;
691
692         switch (q_type) {
693         case ICE_CTL_Q_ADMIN:
694                 cq = &hw->adminq;
695                 if (ice_check_sq_alive(hw, cq))
696                         ice_aq_q_shutdown(hw, unloading);
697                 break;
698         case ICE_CTL_Q_SB:
699                 cq = &hw->sbq;
700                 break;
701         case ICE_CTL_Q_MAILBOX:
702                 cq = &hw->mailboxq;
703                 break;
704         default:
705                 return;
706         }
707
708         ice_shutdown_sq(hw, cq);
709         ice_shutdown_rq(hw, cq);
710 }
711
712 /**
713  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
714  * @hw: pointer to the hardware structure
715  * @unloading: is the driver unloading itself
716  *
717  * NOTE: this function does not destroy the control queue locks. The driver
718  * may call this at runtime to shutdown and later restart control queues, such
719  * as in response to a reset event.
720  */
721 void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
722 {
723         /* Shutdown FW admin queue */
724         ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
725         /* Shutdown PHY Sideband */
726         if (ice_is_sbq_supported(hw))
727                 ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
728         /* Shutdown PF-VF Mailbox */
729         ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
730 }
731
732 /**
733  * ice_init_all_ctrlq - main initialization routine for all control queues
734  * @hw: pointer to the hardware structure
735  *
736  * Prior to calling this function, the driver MUST* set the following fields
737  * in the cq->structure for all control queues:
738  *     - cq->num_sq_entries
739  *     - cq->num_rq_entries
740  *     - cq->rq_buf_size
741  *     - cq->sq_buf_size
742  *
743  * NOTE: this function does not initialize the controlq locks.
744  */
745 int ice_init_all_ctrlq(struct ice_hw *hw)
746 {
747         u32 retry = 0;
748         int status;
749
750         /* Init FW admin queue */
751         do {
752                 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
753                 if (status)
754                         return status;
755
756                 status = ice_init_check_adminq(hw);
757                 if (status != -EIO)
758                         break;
759
760                 ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
761                 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
762                 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
763         } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
764
765         if (status)
766                 return status;
767         /* sideband control queue (SBQ) interface is not supported on some
768          * devices. Initialize if supported, else fallback to the admin queue
769          * interface
770          */
771         if (ice_is_sbq_supported(hw)) {
772                 status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
773                 if (status)
774                         return status;
775         }
776         /* Init Mailbox queue */
777         return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
778 }
779
780 /**
781  * ice_init_ctrlq_locks - Initialize locks for a control queue
782  * @cq: pointer to the control queue
783  *
784  * Initializes the send and receive queue locks for a given control queue.
785  */
786 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
787 {
788         mutex_init(&cq->sq_lock);
789         mutex_init(&cq->rq_lock);
790 }
791
792 /**
793  * ice_create_all_ctrlq - main initialization routine for all control queues
794  * @hw: pointer to the hardware structure
795  *
796  * Prior to calling this function, the driver *MUST* set the following fields
797  * in the cq->structure for all control queues:
798  *     - cq->num_sq_entries
799  *     - cq->num_rq_entries
800  *     - cq->rq_buf_size
801  *     - cq->sq_buf_size
802  *
803  * This function creates all the control queue locks and then calls
804  * ice_init_all_ctrlq. It should be called once during driver load. If the
805  * driver needs to re-initialize control queues at run time it should call
806  * ice_init_all_ctrlq instead.
807  */
808 int ice_create_all_ctrlq(struct ice_hw *hw)
809 {
810         ice_init_ctrlq_locks(&hw->adminq);
811         if (ice_is_sbq_supported(hw))
812                 ice_init_ctrlq_locks(&hw->sbq);
813         ice_init_ctrlq_locks(&hw->mailboxq);
814
815         return ice_init_all_ctrlq(hw);
816 }
817
818 /**
819  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
820  * @cq: pointer to the control queue
821  *
822  * Destroys the send and receive queue locks for a given control queue.
823  */
824 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
825 {
826         mutex_destroy(&cq->sq_lock);
827         mutex_destroy(&cq->rq_lock);
828 }
829
830 /**
831  * ice_destroy_all_ctrlq - exit routine for all control queues
832  * @hw: pointer to the hardware structure
833  *
834  * This function shuts down all the control queues and then destroys the
835  * control queue locks. It should be called once during driver unload. The
836  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
837  * reinitialize control queues, such as in response to a reset event.
838  */
839 void ice_destroy_all_ctrlq(struct ice_hw *hw)
840 {
841         /* shut down all the control queues first */
842         ice_shutdown_all_ctrlq(hw, true);
843
844         ice_destroy_ctrlq_locks(&hw->adminq);
845         if (ice_is_sbq_supported(hw))
846                 ice_destroy_ctrlq_locks(&hw->sbq);
847         ice_destroy_ctrlq_locks(&hw->mailboxq);
848 }
849
850 /**
851  * ice_clean_sq - cleans send side of a control queue
852  * @hw: pointer to the hardware structure
853  * @cq: pointer to the specific Control queue
854  *
855  * returns the number of free desc
856  */
857 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
858 {
859         struct ice_ctl_q_ring *sq = &cq->sq;
860         u16 ntc = sq->next_to_clean;
861         struct ice_aq_desc *desc;
862
863         desc = ICE_CTL_Q_DESC(*sq, ntc);
864
865         while (rd32(hw, cq->sq.head) != ntc) {
866                 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
867                 memset(desc, 0, sizeof(*desc));
868                 ntc++;
869                 if (ntc == sq->count)
870                         ntc = 0;
871                 desc = ICE_CTL_Q_DESC(*sq, ntc);
872         }
873
874         sq->next_to_clean = ntc;
875
876         return ICE_CTL_Q_DESC_UNUSED(sq);
877 }
878
879 /**
880  * ice_ctl_q_str - Convert control queue type to string
881  * @qtype: the control queue type
882  *
883  * Return: A string name for the given control queue type.
884  */
885 static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
886 {
887         switch (qtype) {
888         case ICE_CTL_Q_UNKNOWN:
889                 return "Unknown CQ";
890         case ICE_CTL_Q_ADMIN:
891                 return "AQ";
892         case ICE_CTL_Q_MAILBOX:
893                 return "MBXQ";
894         case ICE_CTL_Q_SB:
895                 return "SBQ";
896         default:
897                 return "Unrecognized CQ";
898         }
899 }
900
901 /**
902  * ice_debug_cq
903  * @hw: pointer to the hardware structure
904  * @cq: pointer to the specific Control queue
905  * @desc: pointer to control queue descriptor
906  * @buf: pointer to command buffer
907  * @buf_len: max length of buf
908  * @response: true if this is the writeback response
909  *
910  * Dumps debug log about control command with descriptor contents.
911  */
912 static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
913                          void *desc, void *buf, u16 buf_len, bool response)
914 {
915         struct ice_aq_desc *cq_desc = desc;
916         u16 datalen, flags;
917
918         if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
919             !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
920                 return;
921
922         if (!desc)
923                 return;
924
925         datalen = le16_to_cpu(cq_desc->datalen);
926         flags = le16_to_cpu(cq_desc->flags);
927
928         ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1)  0x%08X 0x%08X\n\taddr (h,l)   0x%08X 0x%08X\n",
929                   ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
930                   le16_to_cpu(cq_desc->opcode), flags, datalen,
931                   le16_to_cpu(cq_desc->retval),
932                   le32_to_cpu(cq_desc->cookie_high),
933                   le32_to_cpu(cq_desc->cookie_low),
934                   le32_to_cpu(cq_desc->params.generic.param0),
935                   le32_to_cpu(cq_desc->params.generic.param1),
936                   le32_to_cpu(cq_desc->params.generic.addr_high),
937                   le32_to_cpu(cq_desc->params.generic.addr_low));
938         /* Dump buffer iff 1) one exists and 2) is either a response indicated
939          * by the DD and/or CMP flag set or a command with the RD flag set.
940          */
941         if (buf && cq_desc->datalen &&
942             (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) {
943                 char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
944
945                 sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
946                         le32_to_cpu(cq_desc->params.generic.addr_high),
947                         le32_to_cpu(cq_desc->params.generic.addr_low));
948                 ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix,
949                                          buf,
950                                          min_t(u16, buf_len, datalen));
951         }
952 }
953
954 /**
955  * ice_sq_done - poll until the last send on a control queue has completed
956  * @hw: pointer to the HW struct
957  * @cq: pointer to the specific Control queue
958  *
959  * Use read_poll_timeout to poll the control queue head, checking until it
960  * matches next_to_use. According to the control queue designers, this has
961  * better timing reliability than the DD bit.
962  *
963  * Return: true if all the descriptors on the send side of a control queue
964  *         are finished processing, false otherwise.
965  */
966 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
967 {
968         u32 head;
969
970         /* Wait a short time before the initial check, to allow hardware time
971          * for completion.
972          */
973         udelay(5);
974
975         return !rd32_poll_timeout(hw, cq->sq.head,
976                                   head, head == cq->sq.next_to_use,
977                                   20, ICE_CTL_Q_SQ_CMD_TIMEOUT);
978 }
979
980 /**
981  * ice_sq_send_cmd - send command to a control queue
982  * @hw: pointer to the HW struct
983  * @cq: pointer to the specific Control queue
984  * @desc: prefilled descriptor describing the command
985  * @buf: buffer to use for indirect commands (or NULL for direct commands)
986  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
987  * @cd: pointer to command details structure
988  *
989  * Main command for the transmit side of a control queue. It puts the command
990  * on the queue, bumps the tail, waits for processing of the command, captures
991  * command status and results, etc.
992  */
993 int
994 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
995                 struct ice_aq_desc *desc, void *buf, u16 buf_size,
996                 struct ice_sq_cd *cd)
997 {
998         struct ice_dma_mem *dma_buf = NULL;
999         struct ice_aq_desc *desc_on_ring;
1000         bool cmd_completed = false;
1001         int status = 0;
1002         u16 retval = 0;
1003         u32 val = 0;
1004
1005         /* if reset is in progress return a soft error */
1006         if (hw->reset_ongoing)
1007                 return -EBUSY;
1008         mutex_lock(&cq->sq_lock);
1009
1010         cq->sq_last_status = ICE_AQ_RC_OK;
1011
1012         if (!cq->sq.count) {
1013                 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
1014                 status = -EIO;
1015                 goto sq_send_command_error;
1016         }
1017
1018         if ((buf && !buf_size) || (!buf && buf_size)) {
1019                 status = -EINVAL;
1020                 goto sq_send_command_error;
1021         }
1022
1023         if (buf) {
1024                 if (buf_size > cq->sq_buf_size) {
1025                         ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
1026                                   buf_size);
1027                         status = -EINVAL;
1028                         goto sq_send_command_error;
1029                 }
1030
1031                 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
1032                 if (buf_size > ICE_AQ_LG_BUF)
1033                         desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1034         }
1035
1036         val = rd32(hw, cq->sq.head);
1037         if (val >= cq->num_sq_entries) {
1038                 ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
1039                           val);
1040                 status = -EIO;
1041                 goto sq_send_command_error;
1042         }
1043
1044         /* Call clean and check queue available function to reclaim the
1045          * descriptors that were processed by FW/MBX; the function returns the
1046          * number of desc available. The clean function called here could be
1047          * called in a separate thread in case of asynchronous completions.
1048          */
1049         if (ice_clean_sq(hw, cq) == 0) {
1050                 ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1051                 status = -ENOSPC;
1052                 goto sq_send_command_error;
1053         }
1054
1055         /* initialize the temp desc pointer with the right desc */
1056         desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1057
1058         /* if the desc is available copy the temp desc to the right place */
1059         memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
1060
1061         /* if buf is not NULL assume indirect command */
1062         if (buf) {
1063                 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1064                 /* copy the user buf into the respective DMA buf */
1065                 memcpy(dma_buf->va, buf, buf_size);
1066                 desc_on_ring->datalen = cpu_to_le16(buf_size);
1067
1068                 /* Update the address values in the desc with the pa value
1069                  * for respective buffer
1070                  */
1071                 desc_on_ring->params.generic.addr_high =
1072                         cpu_to_le32(upper_32_bits(dma_buf->pa));
1073                 desc_on_ring->params.generic.addr_low =
1074                         cpu_to_le32(lower_32_bits(dma_buf->pa));
1075         }
1076
1077         /* Debug desc and buffer */
1078         ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1079
1080         ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
1081
1082         (cq->sq.next_to_use)++;
1083         if (cq->sq.next_to_use == cq->sq.count)
1084                 cq->sq.next_to_use = 0;
1085         wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1086         ice_flush(hw);
1087
1088         /* Wait for the command to complete. If it finishes within the
1089          * timeout, copy the descriptor back to temp.
1090          */
1091         if (ice_sq_done(hw, cq)) {
1092                 memcpy(desc, desc_on_ring, sizeof(*desc));
1093                 if (buf) {
1094                         /* get returned length to copy */
1095                         u16 copy_size = le16_to_cpu(desc->datalen);
1096
1097                         if (copy_size > buf_size) {
1098                                 ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1099                                           copy_size, buf_size);
1100                                 status = -EIO;
1101                         } else {
1102                                 memcpy(buf, dma_buf->va, copy_size);
1103                         }
1104                 }
1105                 retval = le16_to_cpu(desc->retval);
1106                 if (retval) {
1107                         ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1108                                   le16_to_cpu(desc->opcode),
1109                                   retval);
1110
1111                         /* strip off FW internal code */
1112                         retval &= 0xff;
1113                 }
1114                 cmd_completed = true;
1115                 if (!status && retval != ICE_AQ_RC_OK)
1116                         status = -EIO;
1117                 cq->sq_last_status = (enum ice_aq_err)retval;
1118         }
1119
1120         ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1121
1122         ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
1123
1124         /* save writeback AQ if requested */
1125         if (cd && cd->wb_desc)
1126                 memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc));
1127
1128         /* update the error if time out occurred */
1129         if (!cmd_completed) {
1130                 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1131                     rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1132                         ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1133                         status = -EIO;
1134                 } else {
1135                         ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1136                         status = -EIO;
1137                 }
1138         }
1139
1140 sq_send_command_error:
1141         mutex_unlock(&cq->sq_lock);
1142         return status;
1143 }
1144
1145 /**
1146  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1147  * @desc: pointer to the temp descriptor (non DMA mem)
1148  * @opcode: the opcode can be used to decide which flags to turn off or on
1149  *
1150  * Fill the desc with default values
1151  */
1152 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1153 {
1154         /* zero out the desc */
1155         memset(desc, 0, sizeof(*desc));
1156         desc->opcode = cpu_to_le16(opcode);
1157         desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1158 }
1159
1160 /**
1161  * ice_clean_rq_elem
1162  * @hw: pointer to the HW struct
1163  * @cq: pointer to the specific Control queue
1164  * @e: event info from the receive descriptor, includes any buffers
1165  * @pending: number of events that could be left to process
1166  *
1167  * Clean one element from the receive side of a control queue. On return 'e'
1168  * contains contents of the message, and 'pending' contains the number of
1169  * events left to process.
1170  */
1171 int
1172 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1173                   struct ice_rq_event_info *e, u16 *pending)
1174 {
1175         u16 ntc = cq->rq.next_to_clean;
1176         enum ice_aq_err rq_last_status;
1177         struct ice_aq_desc *desc;
1178         struct ice_dma_mem *bi;
1179         int ret_code = 0;
1180         u16 desc_idx;
1181         u16 datalen;
1182         u16 flags;
1183         u16 ntu;
1184
1185         /* pre-clean the event info */
1186         memset(&e->desc, 0, sizeof(e->desc));
1187
1188         /* take the lock before we start messing with the ring */
1189         mutex_lock(&cq->rq_lock);
1190
1191         if (!cq->rq.count) {
1192                 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1193                 ret_code = -EIO;
1194                 goto clean_rq_elem_err;
1195         }
1196
1197         /* set next_to_use to head */
1198         ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1199
1200         if (ntu == ntc) {
1201                 /* nothing to do - shouldn't need to update ring's values */
1202                 ret_code = -EALREADY;
1203                 goto clean_rq_elem_out;
1204         }
1205
1206         /* now clean the next descriptor */
1207         desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1208         desc_idx = ntc;
1209
1210         rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1211         flags = le16_to_cpu(desc->flags);
1212         if (flags & ICE_AQ_FLAG_ERR) {
1213                 ret_code = -EIO;
1214                 ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1215                           le16_to_cpu(desc->opcode), rq_last_status);
1216         }
1217         memcpy(&e->desc, desc, sizeof(e->desc));
1218         datalen = le16_to_cpu(desc->datalen);
1219         e->msg_len = min_t(u16, datalen, e->buf_len);
1220         if (e->msg_buf && e->msg_len)
1221                 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1222
1223         ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1224
1225         ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
1226
1227         /* Restore the original datalen and buffer address in the desc,
1228          * FW updates datalen to indicate the event message size
1229          */
1230         bi = &cq->rq.r.rq_bi[ntc];
1231         memset(desc, 0, sizeof(*desc));
1232
1233         desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1234         if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1235                 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1236         desc->datalen = cpu_to_le16(bi->size);
1237         desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1238         desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1239
1240         /* set tail = the last cleaned desc index. */
1241         wr32(hw, cq->rq.tail, ntc);
1242         /* ntc is updated to tail + 1 */
1243         ntc++;
1244         if (ntc == cq->num_rq_entries)
1245                 ntc = 0;
1246         cq->rq.next_to_clean = ntc;
1247         cq->rq.next_to_use = ntu;
1248
1249 clean_rq_elem_out:
1250         /* Set pending if needed, unlock and return */
1251         if (pending) {
1252                 /* re-read HW head to calculate actual pending messages */
1253                 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1254                 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1255         }
1256 clean_rq_elem_err:
1257         mutex_unlock(&cq->rq_lock);
1258
1259         return ret_code;
1260 }
This page took 0.096246 seconds and 4 git commands to generate.