]> Git Repo - linux.git/blob - drivers/net/wireless/intel/iwlwifi/pcie/rx.c
Linux 6.14-rc3
[linux.git] / drivers / net / wireless / intel / iwlwifi / pcie / rx.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/sched.h>
8 #include <linux/wait.h>
9 #include <linux/gfp.h>
10
11 #include "iwl-prph.h"
12 #include "iwl-io.h"
13 #include "internal.h"
14 #include "iwl-op-mode.h"
15 #include "iwl-context-info-gen3.h"
16
17 /******************************************************************************
18  *
19  * RX path functions
20  *
21  ******************************************************************************/
22
23 /*
24  * Rx theory of operation
25  *
26  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
27  * each of which point to Receive Buffers to be filled by the NIC.  These get
28  * used not only for Rx frames, but for any command response or notification
29  * from the NIC.  The driver and NIC manage the Rx buffers by means
30  * of indexes into the circular buffer.
31  *
32  * Rx Queue Indexes
33  * The host/firmware share two index registers for managing the Rx buffers.
34  *
35  * The READ index maps to the first position that the firmware may be writing
36  * to -- the driver can read up to (but not including) this position and get
37  * good data.
38  * The READ index is managed by the firmware once the card is enabled.
39  *
40  * The WRITE index maps to the last position the driver has read from -- the
41  * position preceding WRITE is the last slot the firmware can place a packet.
42  *
43  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
44  * WRITE = READ.
45  *
46  * During initialization, the host sets up the READ queue position to the first
47  * INDEX position, and WRITE to the last (READ - 1 wrapped)
48  *
49  * When the firmware places a packet in a buffer, it will advance the READ index
50  * and fire the RX interrupt.  The driver can then query the READ index and
51  * process as many packets as possible, moving the WRITE index forward as it
52  * resets the Rx queue buffers with new memory.
53  *
54  * The management in the driver is as follows:
55  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
56  *   When the interrupt handler is called, the request is processed.
57  *   The page is either stolen - transferred to the upper layer
58  *   or reused - added immediately to the iwl->rxq->rx_free list.
59  * + When the page is stolen - the driver updates the matching queue's used
60  *   count, detaches the RBD and transfers it to the queue used list.
61  *   When there are two used RBDs - they are transferred to the allocator empty
62  *   list. Work is then scheduled for the allocator to start allocating
63  *   eight buffers.
64  *   When there are another 6 used RBDs - they are transferred to the allocator
65  *   empty list and the driver tries to claim the pre-allocated buffers and
66  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
67  *   until ready.
68  *   When there are 8+ buffers in the free list - either from allocation or from
69  *   8 reused unstolen pages - restock is called to update the FW and indexes.
70  * + In order to make sure the allocator always has RBDs to use for allocation
71  *   the allocator has initial pool in the size of num_queues*(8-2) - the
72  *   maximum missing RBDs per allocation request (request posted with 2
73  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
74  *   The queues supplies the recycle of the rest of the RBDs.
75  * + A received packet is processed and handed to the kernel network stack,
76  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
77  * + If there are no allocated buffers in iwl->rxq->rx_free,
78  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
79  *   If there were enough free buffers and RX_STALLED is set it is cleared.
80  *
81  *
82  * Driver sequence:
83  *
84  * iwl_rxq_alloc()            Allocates rx_free
85  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
86  *                            iwl_pcie_rxq_restock.
87  *                            Used only during initialization.
88  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
89  *                            queue, updates firmware pointers, and updates
90  *                            the WRITE index.
91  * iwl_pcie_rx_allocator()     Background work for allocating pages.
92  *
93  * -- enable interrupts --
94  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
95  *                            READ INDEX, detaching the SKB from the pool.
96  *                            Moves the packet buffer from queue to rx_used.
97  *                            Posts and claims requests to the allocator.
98  *                            Calls iwl_pcie_rxq_restock to refill any empty
99  *                            slots.
100  *
101  * RBD life-cycle:
102  *
103  * Init:
104  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
105  *
106  * Regular Receive interrupt:
107  * Page Stolen:
108  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
110  * Page not Stolen:
111  * rxq.queue -> rxq.rx_free -> rxq.queue
112  * ...
113  *
114  */
115
116 /*
117  * iwl_rxq_space - Return number of free slots available in queue.
118  */
119 static int iwl_rxq_space(const struct iwl_rxq *rxq)
120 {
121         /* Make sure rx queue size is a power of 2 */
122         WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
123
124         /*
125          * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
126          * between empty and completely full queues.
127          * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
128          * defined for negative dividends.
129          */
130         return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
131 }
132
133 /*
134  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
135  */
136 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
137 {
138         return cpu_to_le32((u32)(dma_addr >> 8));
139 }
140
141 /*
142  * iwl_pcie_rx_stop - stops the Rx DMA
143  */
144 int iwl_pcie_rx_stop(struct iwl_trans *trans)
145 {
146         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
147                 /* TODO: remove this once fw does it */
148                 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
149                 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
150                                               RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
151         } else if (trans->trans_cfg->mq_rx_supported) {
152                 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
153                 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
154                                            RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
155         } else {
156                 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157                 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
158                                            FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
159                                            1000);
160         }
161 }
162
163 /*
164  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
165  */
166 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
167                                     struct iwl_rxq *rxq)
168 {
169         u32 reg;
170
171         lockdep_assert_held(&rxq->lock);
172
173         /*
174          * explicitly wake up the NIC if:
175          * 1. shadow registers aren't enabled
176          * 2. there is a chance that the NIC is asleep
177          */
178         if (!trans->trans_cfg->base_params->shadow_reg_enable &&
179             test_bit(STATUS_TPOWER_PMI, &trans->status)) {
180                 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
181
182                 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
183                         IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
184                                        reg);
185                         iwl_set_bit(trans, CSR_GP_CNTRL,
186                                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
187                         rxq->need_update = true;
188                         return;
189                 }
190         }
191
192         rxq->write_actual = round_down(rxq->write, 8);
193         if (!trans->trans_cfg->mq_rx_supported)
194                 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
195         else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
196                 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
197                             HBUS_TARG_WRPTR_RX_Q(rxq->id));
198         else
199                 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
200                             rxq->write_actual);
201 }
202
203 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
204 {
205         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
206         int i;
207
208         for (i = 0; i < trans->num_rx_queues; i++) {
209                 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
210
211                 if (!rxq->need_update)
212                         continue;
213                 spin_lock_bh(&rxq->lock);
214                 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
215                 rxq->need_update = false;
216                 spin_unlock_bh(&rxq->lock);
217         }
218 }
219
220 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
221                                 struct iwl_rxq *rxq,
222                                 struct iwl_rx_mem_buffer *rxb)
223 {
224         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
225                 struct iwl_rx_transfer_desc *bd = rxq->bd;
226
227                 BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
228
229                 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
230                 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
231         } else {
232                 __le64 *bd = rxq->bd;
233
234                 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
235         }
236
237         IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
238                      (u32)rxb->vid, rxq->id, rxq->write);
239 }
240
241 /*
242  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
243  */
244 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
245                                   struct iwl_rxq *rxq)
246 {
247         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
248         struct iwl_rx_mem_buffer *rxb;
249
250         /*
251          * If the device isn't enabled - no need to try to add buffers...
252          * This can happen when we stop the device and still have an interrupt
253          * pending. We stop the APM before we sync the interrupts because we
254          * have to (see comment there). On the other hand, since the APM is
255          * stopped, we cannot access the HW (in particular not prph).
256          * So don't try to restock if the APM has been already stopped.
257          */
258         if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
259                 return;
260
261         spin_lock_bh(&rxq->lock);
262         while (rxq->free_count) {
263                 /* Get next free Rx buffer, remove from free list */
264                 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
265                                        list);
266                 list_del(&rxb->list);
267                 rxb->invalid = false;
268                 /* some low bits are expected to be unset (depending on hw) */
269                 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
270                 /* Point to Rx buffer via next RBD in circular buffer */
271                 iwl_pcie_restock_bd(trans, rxq, rxb);
272                 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
273                 rxq->free_count--;
274         }
275         spin_unlock_bh(&rxq->lock);
276
277         /*
278          * If we've added more space for the firmware to place data, tell it.
279          * Increment device's write pointer in multiples of 8.
280          */
281         if (rxq->write_actual != (rxq->write & ~0x7)) {
282                 spin_lock_bh(&rxq->lock);
283                 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
284                 spin_unlock_bh(&rxq->lock);
285         }
286 }
287
288 /*
289  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
290  */
291 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
292                                   struct iwl_rxq *rxq)
293 {
294         struct iwl_rx_mem_buffer *rxb;
295
296         /*
297          * If the device isn't enabled - not need to try to add buffers...
298          * This can happen when we stop the device and still have an interrupt
299          * pending. We stop the APM before we sync the interrupts because we
300          * have to (see comment there). On the other hand, since the APM is
301          * stopped, we cannot access the HW (in particular not prph).
302          * So don't try to restock if the APM has been already stopped.
303          */
304         if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
305                 return;
306
307         spin_lock_bh(&rxq->lock);
308         while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
309                 __le32 *bd = (__le32 *)rxq->bd;
310                 /* The overwritten rxb must be a used one */
311                 rxb = rxq->queue[rxq->write];
312                 BUG_ON(rxb && rxb->page);
313
314                 /* Get next free Rx buffer, remove from free list */
315                 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
316                                        list);
317                 list_del(&rxb->list);
318                 rxb->invalid = false;
319
320                 /* Point to Rx buffer via next RBD in circular buffer */
321                 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
322                 rxq->queue[rxq->write] = rxb;
323                 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
324                 rxq->free_count--;
325         }
326         spin_unlock_bh(&rxq->lock);
327
328         /* If we've added more space for the firmware to place data, tell it.
329          * Increment device's write pointer in multiples of 8. */
330         if (rxq->write_actual != (rxq->write & ~0x7)) {
331                 spin_lock_bh(&rxq->lock);
332                 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
333                 spin_unlock_bh(&rxq->lock);
334         }
335 }
336
337 /*
338  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
339  *
340  * If there are slots in the RX queue that need to be restocked,
341  * and we have free pre-allocated buffers, fill the ranks as much
342  * as we can, pulling from rx_free.
343  *
344  * This moves the 'write' index forward to catch up with 'processed', and
345  * also updates the memory address in the firmware to reference the new
346  * target buffer.
347  */
348 static
349 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
350 {
351         if (trans->trans_cfg->mq_rx_supported)
352                 iwl_pcie_rxmq_restock(trans, rxq);
353         else
354                 iwl_pcie_rxsq_restock(trans, rxq);
355 }
356
357 /*
358  * iwl_pcie_rx_alloc_page - allocates and returns a page.
359  *
360  */
361 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
362                                            u32 *offset, gfp_t priority)
363 {
364         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
365         unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
366         unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
367         struct page *page;
368         gfp_t gfp_mask = priority;
369
370         if (trans_pcie->rx_page_order > 0)
371                 gfp_mask |= __GFP_COMP;
372
373         if (trans_pcie->alloc_page) {
374                 spin_lock_bh(&trans_pcie->alloc_page_lock);
375                 /* recheck */
376                 if (trans_pcie->alloc_page) {
377                         *offset = trans_pcie->alloc_page_used;
378                         page = trans_pcie->alloc_page;
379                         trans_pcie->alloc_page_used += rbsize;
380                         if (trans_pcie->alloc_page_used >= allocsize)
381                                 trans_pcie->alloc_page = NULL;
382                         else
383                                 get_page(page);
384                         spin_unlock_bh(&trans_pcie->alloc_page_lock);
385                         return page;
386                 }
387                 spin_unlock_bh(&trans_pcie->alloc_page_lock);
388         }
389
390         /* Alloc a new receive buffer */
391         page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
392         if (!page) {
393                 if (net_ratelimit())
394                         IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
395                                        trans_pcie->rx_page_order);
396                 /*
397                  * Issue an error if we don't have enough pre-allocated
398                   * buffers.
399                  */
400                 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
401                         IWL_CRIT(trans,
402                                  "Failed to alloc_pages\n");
403                 return NULL;
404         }
405
406         if (2 * rbsize <= allocsize) {
407                 spin_lock_bh(&trans_pcie->alloc_page_lock);
408                 if (!trans_pcie->alloc_page) {
409                         get_page(page);
410                         trans_pcie->alloc_page = page;
411                         trans_pcie->alloc_page_used = rbsize;
412                 }
413                 spin_unlock_bh(&trans_pcie->alloc_page_lock);
414         }
415
416         *offset = 0;
417         return page;
418 }
419
420 /*
421  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
422  *
423  * A used RBD is an Rx buffer that has been given to the stack. To use it again
424  * a page must be allocated and the RBD must point to the page. This function
425  * doesn't change the HW pointer but handles the list of pages that is used by
426  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
427  * allocated buffers.
428  */
429 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
430                             struct iwl_rxq *rxq)
431 {
432         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
433         struct iwl_rx_mem_buffer *rxb;
434         struct page *page;
435
436         while (1) {
437                 unsigned int offset;
438
439                 spin_lock_bh(&rxq->lock);
440                 if (list_empty(&rxq->rx_used)) {
441                         spin_unlock_bh(&rxq->lock);
442                         return;
443                 }
444                 spin_unlock_bh(&rxq->lock);
445
446                 page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
447                 if (!page)
448                         return;
449
450                 spin_lock_bh(&rxq->lock);
451
452                 if (list_empty(&rxq->rx_used)) {
453                         spin_unlock_bh(&rxq->lock);
454                         __free_pages(page, trans_pcie->rx_page_order);
455                         return;
456                 }
457                 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
458                                        list);
459                 list_del(&rxb->list);
460                 spin_unlock_bh(&rxq->lock);
461
462                 BUG_ON(rxb->page);
463                 rxb->page = page;
464                 rxb->offset = offset;
465                 /* Get physical address of the RB */
466                 rxb->page_dma =
467                         dma_map_page(trans->dev, page, rxb->offset,
468                                      trans_pcie->rx_buf_bytes,
469                                      DMA_FROM_DEVICE);
470                 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
471                         rxb->page = NULL;
472                         spin_lock_bh(&rxq->lock);
473                         list_add(&rxb->list, &rxq->rx_used);
474                         spin_unlock_bh(&rxq->lock);
475                         __free_pages(page, trans_pcie->rx_page_order);
476                         return;
477                 }
478
479                 spin_lock_bh(&rxq->lock);
480
481                 list_add_tail(&rxb->list, &rxq->rx_free);
482                 rxq->free_count++;
483
484                 spin_unlock_bh(&rxq->lock);
485         }
486 }
487
488 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
489 {
490         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
491         int i;
492
493         if (!trans_pcie->rx_pool)
494                 return;
495
496         for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
497                 if (!trans_pcie->rx_pool[i].page)
498                         continue;
499                 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
500                                trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
501                 __free_pages(trans_pcie->rx_pool[i].page,
502                              trans_pcie->rx_page_order);
503                 trans_pcie->rx_pool[i].page = NULL;
504         }
505 }
506
507 /*
508  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
509  *
510  * Allocates for each received request 8 pages
511  * Called as a scheduled work item.
512  */
513 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
514 {
515         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
516         struct iwl_rb_allocator *rba = &trans_pcie->rba;
517         struct list_head local_empty;
518         int pending = atomic_read(&rba->req_pending);
519
520         IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
521
522         /* If we were scheduled - there is at least one request */
523         spin_lock_bh(&rba->lock);
524         /* swap out the rba->rbd_empty to a local list */
525         list_replace_init(&rba->rbd_empty, &local_empty);
526         spin_unlock_bh(&rba->lock);
527
528         while (pending) {
529                 int i;
530                 LIST_HEAD(local_allocated);
531                 gfp_t gfp_mask = GFP_KERNEL;
532
533                 /* Do not post a warning if there are only a few requests */
534                 if (pending < RX_PENDING_WATERMARK)
535                         gfp_mask |= __GFP_NOWARN;
536
537                 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
538                         struct iwl_rx_mem_buffer *rxb;
539                         struct page *page;
540
541                         /* List should never be empty - each reused RBD is
542                          * returned to the list, and initial pool covers any
543                          * possible gap between the time the page is allocated
544                          * to the time the RBD is added.
545                          */
546                         BUG_ON(list_empty(&local_empty));
547                         /* Get the first rxb from the rbd list */
548                         rxb = list_first_entry(&local_empty,
549                                                struct iwl_rx_mem_buffer, list);
550                         BUG_ON(rxb->page);
551
552                         /* Alloc a new receive buffer */
553                         page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
554                                                       gfp_mask);
555                         if (!page)
556                                 continue;
557                         rxb->page = page;
558
559                         /* Get physical address of the RB */
560                         rxb->page_dma = dma_map_page(trans->dev, page,
561                                                      rxb->offset,
562                                                      trans_pcie->rx_buf_bytes,
563                                                      DMA_FROM_DEVICE);
564                         if (dma_mapping_error(trans->dev, rxb->page_dma)) {
565                                 rxb->page = NULL;
566                                 __free_pages(page, trans_pcie->rx_page_order);
567                                 continue;
568                         }
569
570                         /* move the allocated entry to the out list */
571                         list_move(&rxb->list, &local_allocated);
572                         i++;
573                 }
574
575                 atomic_dec(&rba->req_pending);
576                 pending--;
577
578                 if (!pending) {
579                         pending = atomic_read(&rba->req_pending);
580                         if (pending)
581                                 IWL_DEBUG_TPT(trans,
582                                               "Got more pending allocation requests = %d\n",
583                                               pending);
584                 }
585
586                 spin_lock_bh(&rba->lock);
587                 /* add the allocated rbds to the allocator allocated list */
588                 list_splice_tail(&local_allocated, &rba->rbd_allocated);
589                 /* get more empty RBDs for current pending requests */
590                 list_splice_tail_init(&rba->rbd_empty, &local_empty);
591                 spin_unlock_bh(&rba->lock);
592
593                 atomic_inc(&rba->req_ready);
594
595         }
596
597         spin_lock_bh(&rba->lock);
598         /* return unused rbds to the allocator empty list */
599         list_splice_tail(&local_empty, &rba->rbd_empty);
600         spin_unlock_bh(&rba->lock);
601
602         IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
603 }
604
605 /*
606  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
607 .*
608 .* Called by queue when the queue posted allocation request and
609  * has freed 8 RBDs in order to restock itself.
610  * This function directly moves the allocated RBs to the queue's ownership
611  * and updates the relevant counters.
612  */
613 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
614                                       struct iwl_rxq *rxq)
615 {
616         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
617         struct iwl_rb_allocator *rba = &trans_pcie->rba;
618         int i;
619
620         lockdep_assert_held(&rxq->lock);
621
622         /*
623          * atomic_dec_if_positive returns req_ready - 1 for any scenario.
624          * If req_ready is 0 atomic_dec_if_positive will return -1 and this
625          * function will return early, as there are no ready requests.
626          * atomic_dec_if_positive will perofrm the *actual* decrement only if
627          * req_ready > 0, i.e. - there are ready requests and the function
628          * hands one request to the caller.
629          */
630         if (atomic_dec_if_positive(&rba->req_ready) < 0)
631                 return;
632
633         spin_lock(&rba->lock);
634         for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
635                 /* Get next free Rx buffer, remove it from free list */
636                 struct iwl_rx_mem_buffer *rxb =
637                         list_first_entry(&rba->rbd_allocated,
638                                          struct iwl_rx_mem_buffer, list);
639
640                 list_move(&rxb->list, &rxq->rx_free);
641         }
642         spin_unlock(&rba->lock);
643
644         rxq->used_count -= RX_CLAIM_REQ_ALLOC;
645         rxq->free_count += RX_CLAIM_REQ_ALLOC;
646 }
647
648 void iwl_pcie_rx_allocator_work(struct work_struct *data)
649 {
650         struct iwl_rb_allocator *rba_p =
651                 container_of(data, struct iwl_rb_allocator, rx_alloc);
652         struct iwl_trans_pcie *trans_pcie =
653                 container_of(rba_p, struct iwl_trans_pcie, rba);
654
655         iwl_pcie_rx_allocator(trans_pcie->trans);
656 }
657
658 static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
659 {
660         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
661                 return sizeof(struct iwl_rx_transfer_desc);
662
663         return trans->trans_cfg->mq_rx_supported ?
664                         sizeof(__le64) : sizeof(__le32);
665 }
666
667 static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
668 {
669         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
670                 return sizeof(struct iwl_rx_completion_desc_bz);
671
672         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
673                 return sizeof(struct iwl_rx_completion_desc);
674
675         return sizeof(__le32);
676 }
677
678 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
679                                   struct iwl_rxq *rxq)
680 {
681         int free_size = iwl_pcie_free_bd_size(trans);
682
683         if (rxq->bd)
684                 dma_free_coherent(trans->dev,
685                                   free_size * rxq->queue_size,
686                                   rxq->bd, rxq->bd_dma);
687         rxq->bd_dma = 0;
688         rxq->bd = NULL;
689
690         rxq->rb_stts_dma = 0;
691         rxq->rb_stts = NULL;
692
693         if (rxq->used_bd)
694                 dma_free_coherent(trans->dev,
695                                   iwl_pcie_used_bd_size(trans) *
696                                         rxq->queue_size,
697                                   rxq->used_bd, rxq->used_bd_dma);
698         rxq->used_bd_dma = 0;
699         rxq->used_bd = NULL;
700 }
701
702 static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
703 {
704         bool use_rx_td = (trans->trans_cfg->device_family >=
705                           IWL_DEVICE_FAMILY_AX210);
706
707         if (use_rx_td)
708                 return sizeof(__le16);
709
710         return sizeof(struct iwl_rb_status);
711 }
712
713 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
714                                   struct iwl_rxq *rxq)
715 {
716         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
717         size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
718         struct device *dev = trans->dev;
719         int i;
720         int free_size;
721
722         spin_lock_init(&rxq->lock);
723         if (trans->trans_cfg->mq_rx_supported)
724                 rxq->queue_size = trans->cfg->num_rbds;
725         else
726                 rxq->queue_size = RX_QUEUE_SIZE;
727
728         free_size = iwl_pcie_free_bd_size(trans);
729
730         /*
731          * Allocate the circular buffer of Read Buffer Descriptors
732          * (RBDs)
733          */
734         rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
735                                      &rxq->bd_dma, GFP_KERNEL);
736         if (!rxq->bd)
737                 goto err;
738
739         if (trans->trans_cfg->mq_rx_supported) {
740                 rxq->used_bd = dma_alloc_coherent(dev,
741                                                   iwl_pcie_used_bd_size(trans) *
742                                                         rxq->queue_size,
743                                                   &rxq->used_bd_dma,
744                                                   GFP_KERNEL);
745                 if (!rxq->used_bd)
746                         goto err;
747         }
748
749         rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
750         rxq->rb_stts_dma =
751                 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
752
753         return 0;
754
755 err:
756         for (i = 0; i < trans->num_rx_queues; i++) {
757                 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
758
759                 iwl_pcie_free_rxq_dma(trans, rxq);
760         }
761
762         return -ENOMEM;
763 }
764
765 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
766 {
767         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
768         size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
769         struct iwl_rb_allocator *rba = &trans_pcie->rba;
770         int i, ret;
771
772         if (WARN_ON(trans_pcie->rxq))
773                 return -EINVAL;
774
775         trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
776                                   GFP_KERNEL);
777         trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
778                                       sizeof(trans_pcie->rx_pool[0]),
779                                       GFP_KERNEL);
780         trans_pcie->global_table =
781                 kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
782                         sizeof(trans_pcie->global_table[0]),
783                         GFP_KERNEL);
784         if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
785             !trans_pcie->global_table) {
786                 ret = -ENOMEM;
787                 goto err;
788         }
789
790         spin_lock_init(&rba->lock);
791
792         /*
793          * Allocate the driver's pointer to receive buffer status.
794          * Allocate for all queues continuously (HW requirement).
795          */
796         trans_pcie->base_rb_stts =
797                         dma_alloc_coherent(trans->dev,
798                                            rb_stts_size * trans->num_rx_queues,
799                                            &trans_pcie->base_rb_stts_dma,
800                                            GFP_KERNEL);
801         if (!trans_pcie->base_rb_stts) {
802                 ret = -ENOMEM;
803                 goto err;
804         }
805
806         for (i = 0; i < trans->num_rx_queues; i++) {
807                 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
808
809                 rxq->id = i;
810                 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
811                 if (ret)
812                         goto err;
813         }
814         return 0;
815
816 err:
817         if (trans_pcie->base_rb_stts) {
818                 dma_free_coherent(trans->dev,
819                                   rb_stts_size * trans->num_rx_queues,
820                                   trans_pcie->base_rb_stts,
821                                   trans_pcie->base_rb_stts_dma);
822                 trans_pcie->base_rb_stts = NULL;
823                 trans_pcie->base_rb_stts_dma = 0;
824         }
825         kfree(trans_pcie->rx_pool);
826         trans_pcie->rx_pool = NULL;
827         kfree(trans_pcie->global_table);
828         trans_pcie->global_table = NULL;
829         kfree(trans_pcie->rxq);
830         trans_pcie->rxq = NULL;
831
832         return ret;
833 }
834
835 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
836 {
837         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
838         u32 rb_size;
839         const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
840
841         switch (trans_pcie->rx_buf_size) {
842         case IWL_AMSDU_4K:
843                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
844                 break;
845         case IWL_AMSDU_8K:
846                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
847                 break;
848         case IWL_AMSDU_12K:
849                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
850                 break;
851         default:
852                 WARN_ON(1);
853                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
854         }
855
856         if (!iwl_trans_grab_nic_access(trans))
857                 return;
858
859         /* Stop Rx DMA */
860         iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
861         /* reset and flush pointers */
862         iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
863         iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
864         iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
865
866         /* Reset driver's Rx queue write index */
867         iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
868
869         /* Tell device where to find RBD circular buffer in DRAM */
870         iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
871                     (u32)(rxq->bd_dma >> 8));
872
873         /* Tell device where in DRAM to update its Rx status */
874         iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
875                     rxq->rb_stts_dma >> 4);
876
877         /* Enable Rx DMA
878          * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
879          *      the credit mechanism in 5000 HW RX FIFO
880          * Direct rx interrupts to hosts
881          * Rx buffer size 4 or 8k or 12k
882          * RB timeout 0x10
883          * 256 RBDs
884          */
885         iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
886                     FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
887                     FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
888                     FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
889                     rb_size |
890                     (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
891                     (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
892
893         iwl_trans_release_nic_access(trans);
894
895         /* Set interrupt coalescing timer to default (2048 usecs) */
896         iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
897
898         /* W/A for interrupt coalescing bug in 7260 and 3160 */
899         if (trans->cfg->host_interrupt_operation_mode)
900                 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
901 }
902
903 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
904 {
905         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
906         u32 rb_size, enabled = 0;
907         int i;
908
909         switch (trans_pcie->rx_buf_size) {
910         case IWL_AMSDU_2K:
911                 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
912                 break;
913         case IWL_AMSDU_4K:
914                 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
915                 break;
916         case IWL_AMSDU_8K:
917                 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
918                 break;
919         case IWL_AMSDU_12K:
920                 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
921                 break;
922         default:
923                 WARN_ON(1);
924                 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
925         }
926
927         if (!iwl_trans_grab_nic_access(trans))
928                 return;
929
930         /* Stop Rx DMA */
931         iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
932         /* disable free amd used rx queue operation */
933         iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
934
935         for (i = 0; i < trans->num_rx_queues; i++) {
936                 /* Tell device where to find RBD free table in DRAM */
937                 iwl_write_prph64_no_grab(trans,
938                                          RFH_Q_FRBDCB_BA_LSB(i),
939                                          trans_pcie->rxq[i].bd_dma);
940                 /* Tell device where to find RBD used table in DRAM */
941                 iwl_write_prph64_no_grab(trans,
942                                          RFH_Q_URBDCB_BA_LSB(i),
943                                          trans_pcie->rxq[i].used_bd_dma);
944                 /* Tell device where in DRAM to update its Rx status */
945                 iwl_write_prph64_no_grab(trans,
946                                          RFH_Q_URBD_STTS_WPTR_LSB(i),
947                                          trans_pcie->rxq[i].rb_stts_dma);
948                 /* Reset device indice tables */
949                 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
950                 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
951                 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
952
953                 enabled |= BIT(i) | BIT(i + 16);
954         }
955
956         /*
957          * Enable Rx DMA
958          * Rx buffer size 4 or 8k or 12k
959          * Min RB size 4 or 8
960          * Drop frames that exceed RB size
961          * 512 RBDs
962          */
963         iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
964                                RFH_DMA_EN_ENABLE_VAL | rb_size |
965                                RFH_RXF_DMA_MIN_RB_4_8 |
966                                RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
967                                RFH_RXF_DMA_RBDCB_SIZE_512);
968
969         /*
970          * Activate DMA snooping.
971          * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
972          * Default queue is 0
973          */
974         iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
975                                RFH_GEN_CFG_RFH_DMA_SNOOP |
976                                RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
977                                RFH_GEN_CFG_SERVICE_DMA_SNOOP |
978                                RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
979                                                trans->trans_cfg->integrated ?
980                                                RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
981                                                RFH_GEN_CFG_RB_CHUNK_SIZE_128));
982         /* Enable the relevant rx queues */
983         iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
984
985         iwl_trans_release_nic_access(trans);
986
987         /* Set interrupt coalescing timer to default (2048 usecs) */
988         iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
989 }
990
991 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
992 {
993         lockdep_assert_held(&rxq->lock);
994
995         INIT_LIST_HEAD(&rxq->rx_free);
996         INIT_LIST_HEAD(&rxq->rx_used);
997         rxq->free_count = 0;
998         rxq->used_count = 0;
999 }
1000
1001 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
1002
1003 static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)
1004 {
1005         return *(struct iwl_trans_pcie **)netdev_priv(dev);
1006 }
1007
1008 static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
1009 {
1010         struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1011         struct iwl_trans_pcie *trans_pcie;
1012         struct iwl_trans *trans;
1013         int ret;
1014
1015         trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1016         trans = trans_pcie->trans;
1017
1018         ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1019
1020         IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
1021                       rxq->id, ret, budget);
1022
1023         if (ret < budget) {
1024                 spin_lock(&trans_pcie->irq_lock);
1025                 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1026                         _iwl_enable_interrupts(trans);
1027                 spin_unlock(&trans_pcie->irq_lock);
1028
1029                 napi_complete_done(&rxq->napi, ret);
1030         }
1031
1032         return ret;
1033 }
1034
1035 static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
1036 {
1037         struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1038         struct iwl_trans_pcie *trans_pcie;
1039         struct iwl_trans *trans;
1040         int ret;
1041
1042         trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1043         trans = trans_pcie->trans;
1044
1045         ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1046         IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1047                       budget);
1048
1049         if (ret < budget) {
1050                 int irq_line = rxq->id;
1051
1052                 /* FIRST_RSS is shared with line 0 */
1053                 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1054                     rxq->id == 1)
1055                         irq_line = 0;
1056
1057                 spin_lock(&trans_pcie->irq_lock);
1058                 iwl_pcie_clear_irq(trans, irq_line);
1059                 spin_unlock(&trans_pcie->irq_lock);
1060
1061                 napi_complete_done(&rxq->napi, ret);
1062         }
1063
1064         return ret;
1065 }
1066
1067 void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
1068 {
1069         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1070         int i;
1071
1072         if (unlikely(!trans_pcie->rxq))
1073                 return;
1074
1075         for (i = 0; i < trans->num_rx_queues; i++) {
1076                 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1077
1078                 if (rxq && rxq->napi.poll)
1079                         napi_synchronize(&rxq->napi);
1080         }
1081 }
1082
1083 static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1084 {
1085         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1086         struct iwl_rxq *def_rxq;
1087         struct iwl_rb_allocator *rba = &trans_pcie->rba;
1088         int i, err, queue_size, allocator_pool_size, num_alloc;
1089
1090         if (!trans_pcie->rxq) {
1091                 err = iwl_pcie_rx_alloc(trans);
1092                 if (err)
1093                         return err;
1094         }
1095         def_rxq = trans_pcie->rxq;
1096
1097         cancel_work_sync(&rba->rx_alloc);
1098
1099         spin_lock_bh(&rba->lock);
1100         atomic_set(&rba->req_pending, 0);
1101         atomic_set(&rba->req_ready, 0);
1102         INIT_LIST_HEAD(&rba->rbd_allocated);
1103         INIT_LIST_HEAD(&rba->rbd_empty);
1104         spin_unlock_bh(&rba->lock);
1105
1106         /* free all first - we overwrite everything here */
1107         iwl_pcie_free_rbs_pool(trans);
1108
1109         for (i = 0; i < RX_QUEUE_SIZE; i++)
1110                 def_rxq->queue[i] = NULL;
1111
1112         for (i = 0; i < trans->num_rx_queues; i++) {
1113                 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1114
1115                 spin_lock_bh(&rxq->lock);
1116                 /*
1117                  * Set read write pointer to reflect that we have processed
1118                  * and used all buffers, but have not restocked the Rx queue
1119                  * with fresh buffers
1120                  */
1121                 rxq->read = 0;
1122                 rxq->write = 0;
1123                 rxq->write_actual = 0;
1124                 memset(rxq->rb_stts, 0,
1125                        (trans->trans_cfg->device_family >=
1126                         IWL_DEVICE_FAMILY_AX210) ?
1127                        sizeof(__le16) : sizeof(struct iwl_rb_status));
1128
1129                 iwl_pcie_rx_init_rxb_lists(rxq);
1130
1131                 spin_unlock_bh(&rxq->lock);
1132
1133                 if (!rxq->napi.poll) {
1134                         int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
1135
1136                         if (trans_pcie->msix_enabled)
1137                                 poll = iwl_pcie_napi_poll_msix;
1138
1139                         netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
1140                                        poll);
1141                         napi_enable(&rxq->napi);
1142                 }
1143
1144         }
1145
1146         /* move the pool to the default queue and allocator ownerships */
1147         queue_size = trans->trans_cfg->mq_rx_supported ?
1148                         trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1149         allocator_pool_size = trans->num_rx_queues *
1150                 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1151         num_alloc = queue_size + allocator_pool_size;
1152
1153         for (i = 0; i < num_alloc; i++) {
1154                 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1155
1156                 if (i < allocator_pool_size)
1157                         list_add(&rxb->list, &rba->rbd_empty);
1158                 else
1159                         list_add(&rxb->list, &def_rxq->rx_used);
1160                 trans_pcie->global_table[i] = rxb;
1161                 rxb->vid = (u16)(i + 1);
1162                 rxb->invalid = true;
1163         }
1164
1165         iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1166
1167         return 0;
1168 }
1169
1170 int iwl_pcie_rx_init(struct iwl_trans *trans)
1171 {
1172         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1173         int ret = _iwl_pcie_rx_init(trans);
1174
1175         if (ret)
1176                 return ret;
1177
1178         if (trans->trans_cfg->mq_rx_supported)
1179                 iwl_pcie_rx_mq_hw_init(trans);
1180         else
1181                 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1182
1183         iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1184
1185         spin_lock_bh(&trans_pcie->rxq->lock);
1186         iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1187         spin_unlock_bh(&trans_pcie->rxq->lock);
1188
1189         return 0;
1190 }
1191
1192 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1193 {
1194         /* Set interrupt coalescing timer to default (2048 usecs) */
1195         iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1196
1197         /*
1198          * We don't configure the RFH.
1199          * Restock will be done at alive, after firmware configured the RFH.
1200          */
1201         return _iwl_pcie_rx_init(trans);
1202 }
1203
1204 void iwl_pcie_rx_free(struct iwl_trans *trans)
1205 {
1206         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1207         size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
1208         struct iwl_rb_allocator *rba = &trans_pcie->rba;
1209         int i;
1210
1211         /*
1212          * if rxq is NULL, it means that nothing has been allocated,
1213          * exit now
1214          */
1215         if (!trans_pcie->rxq) {
1216                 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1217                 return;
1218         }
1219
1220         cancel_work_sync(&rba->rx_alloc);
1221
1222         iwl_pcie_free_rbs_pool(trans);
1223
1224         if (trans_pcie->base_rb_stts) {
1225                 dma_free_coherent(trans->dev,
1226                                   rb_stts_size * trans->num_rx_queues,
1227                                   trans_pcie->base_rb_stts,
1228                                   trans_pcie->base_rb_stts_dma);
1229                 trans_pcie->base_rb_stts = NULL;
1230                 trans_pcie->base_rb_stts_dma = 0;
1231         }
1232
1233         for (i = 0; i < trans->num_rx_queues; i++) {
1234                 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1235
1236                 iwl_pcie_free_rxq_dma(trans, rxq);
1237
1238                 if (rxq->napi.poll) {
1239                         napi_disable(&rxq->napi);
1240                         netif_napi_del(&rxq->napi);
1241                 }
1242         }
1243         kfree(trans_pcie->rx_pool);
1244         kfree(trans_pcie->global_table);
1245         kfree(trans_pcie->rxq);
1246
1247         if (trans_pcie->alloc_page)
1248                 __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1249 }
1250
1251 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1252                                           struct iwl_rb_allocator *rba)
1253 {
1254         spin_lock(&rba->lock);
1255         list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1256         spin_unlock(&rba->lock);
1257 }
1258
1259 /*
1260  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1261  *
1262  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1263  * When there are 2 empty RBDs - a request for allocation is posted
1264  */
1265 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1266                                   struct iwl_rx_mem_buffer *rxb,
1267                                   struct iwl_rxq *rxq, bool emergency)
1268 {
1269         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1270         struct iwl_rb_allocator *rba = &trans_pcie->rba;
1271
1272         /* Move the RBD to the used list, will be moved to allocator in batches
1273          * before claiming or posting a request*/
1274         list_add_tail(&rxb->list, &rxq->rx_used);
1275
1276         if (unlikely(emergency))
1277                 return;
1278
1279         /* Count the allocator owned RBDs */
1280         rxq->used_count++;
1281
1282         /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1283          * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1284          * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1285          * after but we still need to post another request.
1286          */
1287         if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1288                 /* Move the 2 RBDs to the allocator ownership.
1289                  Allocator has another 6 from pool for the request completion*/
1290                 iwl_pcie_rx_move_to_allocator(rxq, rba);
1291
1292                 atomic_inc(&rba->req_pending);
1293                 queue_work(rba->alloc_wq, &rba->rx_alloc);
1294         }
1295 }
1296
1297 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1298                                 struct iwl_rxq *rxq,
1299                                 struct iwl_rx_mem_buffer *rxb,
1300                                 bool emergency,
1301                                 int i)
1302 {
1303         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1304         struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1305         bool page_stolen = false;
1306         int max_len = trans_pcie->rx_buf_bytes;
1307         u32 offset = 0;
1308
1309         if (WARN_ON(!rxb))
1310                 return;
1311
1312         dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1313
1314         while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1315                 struct iwl_rx_packet *pkt;
1316                 bool reclaim;
1317                 int len;
1318                 struct iwl_rx_cmd_buffer rxcb = {
1319                         ._offset = rxb->offset + offset,
1320                         ._rx_page_order = trans_pcie->rx_page_order,
1321                         ._page = rxb->page,
1322                         ._page_stolen = false,
1323                         .truesize = max_len,
1324                 };
1325
1326                 pkt = rxb_addr(&rxcb);
1327
1328                 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1329                         IWL_DEBUG_RX(trans,
1330                                      "Q %d: RB end marker at offset %d\n",
1331                                      rxq->id, offset);
1332                         break;
1333                 }
1334
1335                 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1336                         FH_RSCSR_RXQ_POS != rxq->id,
1337                      "frame on invalid queue - is on %d and indicates %d\n",
1338                      rxq->id,
1339                      (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1340                         FH_RSCSR_RXQ_POS);
1341
1342                 IWL_DEBUG_RX(trans,
1343                              "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1344                              rxq->id, offset,
1345                              iwl_get_cmd_string(trans,
1346                                                 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
1347                              pkt->hdr.group_id, pkt->hdr.cmd,
1348                              le16_to_cpu(pkt->hdr.sequence));
1349
1350                 len = iwl_rx_packet_len(pkt);
1351                 len += sizeof(u32); /* account for status word */
1352
1353                 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1354
1355                 /* check that what the device tells us made sense */
1356                 if (len < sizeof(*pkt) || offset > max_len)
1357                         break;
1358
1359                 maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
1360
1361                 /* Reclaim a command buffer only if this packet is a response
1362                  *   to a (driver-originated) command.
1363                  * If the packet (e.g. Rx frame) originated from uCode,
1364                  *   there is no command buffer to reclaim.
1365                  * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1366                  *   but apparently a few don't get set; catch them here. */
1367                 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1368                 if (reclaim && !pkt->hdr.group_id) {
1369                         int i;
1370
1371                         for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1372                                 if (trans_pcie->no_reclaim_cmds[i] ==
1373                                                         pkt->hdr.cmd) {
1374                                         reclaim = false;
1375                                         break;
1376                                 }
1377                         }
1378                 }
1379
1380                 if (rxq->id == IWL_DEFAULT_RX_QUEUE)
1381                         iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1382                                        &rxcb);
1383                 else
1384                         iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1385                                            &rxcb, rxq->id);
1386
1387                 /*
1388                  * After here, we should always check rxcb._page_stolen,
1389                  * if it is true then one of the handlers took the page.
1390                  */
1391
1392                 if (reclaim && txq) {
1393                         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1394                         int index = SEQ_TO_INDEX(sequence);
1395                         int cmd_index = iwl_txq_get_cmd_index(txq, index);
1396
1397                         kfree_sensitive(txq->entries[cmd_index].free_buf);
1398                         txq->entries[cmd_index].free_buf = NULL;
1399
1400                         /* Invoke any callbacks, transfer the buffer to caller,
1401                          * and fire off the (possibly) blocking
1402                          * iwl_trans_send_cmd()
1403                          * as we reclaim the driver command queue */
1404                         if (!rxcb._page_stolen)
1405                                 iwl_pcie_hcmd_complete(trans, &rxcb);
1406                         else
1407                                 IWL_WARN(trans, "Claim null rxb?\n");
1408                 }
1409
1410                 page_stolen |= rxcb._page_stolen;
1411                 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1412                         break;
1413         }
1414
1415         /* page was stolen from us -- free our reference */
1416         if (page_stolen) {
1417                 __free_pages(rxb->page, trans_pcie->rx_page_order);
1418                 rxb->page = NULL;
1419         }
1420
1421         /* Reuse the page if possible. For notification packets and
1422          * SKBs that fail to Rx correctly, add them back into the
1423          * rx_free list for reuse later. */
1424         if (rxb->page != NULL) {
1425                 rxb->page_dma =
1426                         dma_map_page(trans->dev, rxb->page, rxb->offset,
1427                                      trans_pcie->rx_buf_bytes,
1428                                      DMA_FROM_DEVICE);
1429                 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1430                         /*
1431                          * free the page(s) as well to not break
1432                          * the invariant that the items on the used
1433                          * list have no page(s)
1434                          */
1435                         __free_pages(rxb->page, trans_pcie->rx_page_order);
1436                         rxb->page = NULL;
1437                         iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1438                 } else {
1439                         list_add_tail(&rxb->list, &rxq->rx_free);
1440                         rxq->free_count++;
1441                 }
1442         } else
1443                 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1444 }
1445
1446 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1447                                                   struct iwl_rxq *rxq, int i,
1448                                                   bool *join)
1449 {
1450         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1451         struct iwl_rx_mem_buffer *rxb;
1452         u16 vid;
1453
1454         BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1455         BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
1456
1457         if (!trans->trans_cfg->mq_rx_supported) {
1458                 rxb = rxq->queue[i];
1459                 rxq->queue[i] = NULL;
1460                 return rxb;
1461         }
1462
1463         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1464                 struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
1465
1466                 vid = le16_to_cpu(cd[i].rbid);
1467                 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1468         } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1469                 struct iwl_rx_completion_desc *cd = rxq->used_bd;
1470
1471                 vid = le16_to_cpu(cd[i].rbid);
1472                 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1473         } else {
1474                 __le32 *cd = rxq->used_bd;
1475
1476                 vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
1477         }
1478
1479         if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1480                 goto out_err;
1481
1482         rxb = trans_pcie->global_table[vid - 1];
1483         if (rxb->invalid)
1484                 goto out_err;
1485
1486         IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1487
1488         rxb->invalid = true;
1489
1490         return rxb;
1491
1492 out_err:
1493         WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1494         iwl_force_nmi(trans);
1495         return NULL;
1496 }
1497
1498 /*
1499  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1500  */
1501 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
1502 {
1503         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1504         struct iwl_rxq *rxq;
1505         u32 r, i, count = 0, handled = 0;
1506         bool emergency = false;
1507
1508         if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1509                 return budget;
1510
1511         rxq = &trans_pcie->rxq[queue];
1512
1513 restart:
1514         spin_lock(&rxq->lock);
1515         /* uCode's read index (stored in shared DRAM) indicates the last Rx
1516          * buffer that the driver may process (last buffer filled by ucode). */
1517         r = iwl_get_closed_rb_stts(trans, rxq);
1518         i = rxq->read;
1519
1520         /* W/A 9000 device step A0 wrap-around bug */
1521         r &= (rxq->queue_size - 1);
1522
1523         /* Rx interrupt, but nothing sent from uCode */
1524         if (i == r)
1525                 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1526
1527         while (i != r && ++handled < budget) {
1528                 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1529                 struct iwl_rx_mem_buffer *rxb;
1530                 /* number of RBDs still waiting for page allocation */
1531                 u32 rb_pending_alloc =
1532                         atomic_read(&trans_pcie->rba.req_pending) *
1533                         RX_CLAIM_REQ_ALLOC;
1534                 bool join = false;
1535
1536                 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1537                              !emergency)) {
1538                         iwl_pcie_rx_move_to_allocator(rxq, rba);
1539                         emergency = true;
1540                         IWL_DEBUG_TPT(trans,
1541                                       "RX path is in emergency. Pending allocations %d\n",
1542                                       rb_pending_alloc);
1543                 }
1544
1545                 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1546
1547                 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1548                 if (!rxb)
1549                         goto out;
1550
1551                 if (unlikely(join || rxq->next_rb_is_fragment)) {
1552                         rxq->next_rb_is_fragment = join;
1553                         /*
1554                          * We can only get a multi-RB in the following cases:
1555                          *  - firmware issue, sending a too big notification
1556                          *  - sniffer mode with a large A-MSDU
1557                          *  - large MTU frames (>2k)
1558                          * since the multi-RB functionality is limited to newer
1559                          * hardware that cannot put multiple entries into a
1560                          * single RB.
1561                          *
1562                          * Right now, the higher layers aren't set up to deal
1563                          * with that, so discard all of these.
1564                          */
1565                         list_add_tail(&rxb->list, &rxq->rx_free);
1566                         rxq->free_count++;
1567                 } else {
1568                         iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1569                 }
1570
1571                 i = (i + 1) & (rxq->queue_size - 1);
1572
1573                 /*
1574                  * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1575                  * try to claim the pre-allocated buffers from the allocator.
1576                  * If not ready - will try to reclaim next time.
1577                  * There is no need to reschedule work - allocator exits only
1578                  * on success
1579                  */
1580                 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1581                         iwl_pcie_rx_allocator_get(trans, rxq);
1582
1583                 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1584                         /* Add the remaining empty RBDs for allocator use */
1585                         iwl_pcie_rx_move_to_allocator(rxq, rba);
1586                 } else if (emergency) {
1587                         count++;
1588                         if (count == 8) {
1589                                 count = 0;
1590                                 if (rb_pending_alloc < rxq->queue_size / 3) {
1591                                         IWL_DEBUG_TPT(trans,
1592                                                       "RX path exited emergency. Pending allocations %d\n",
1593                                                       rb_pending_alloc);
1594                                         emergency = false;
1595                                 }
1596
1597                                 rxq->read = i;
1598                                 spin_unlock(&rxq->lock);
1599                                 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1600                                 iwl_pcie_rxq_restock(trans, rxq);
1601                                 goto restart;
1602                         }
1603                 }
1604         }
1605 out:
1606         /* Backtrack one entry */
1607         rxq->read = i;
1608         spin_unlock(&rxq->lock);
1609
1610         /*
1611          * handle a case where in emergency there are some unallocated RBDs.
1612          * those RBDs are in the used list, but are not tracked by the queue's
1613          * used_count which counts allocator owned RBDs.
1614          * unallocated emergency RBDs must be allocated on exit, otherwise
1615          * when called again the function may not be in emergency mode and
1616          * they will be handed to the allocator with no tracking in the RBD
1617          * allocator counters, which will lead to them never being claimed back
1618          * by the queue.
1619          * by allocating them here, they are now in the queue free list, and
1620          * will be restocked by the next call of iwl_pcie_rxq_restock.
1621          */
1622         if (unlikely(emergency && count))
1623                 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1624
1625         iwl_pcie_rxq_restock(trans, rxq);
1626
1627         return handled;
1628 }
1629
1630 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1631 {
1632         u8 queue = entry->entry;
1633         struct msix_entry *entries = entry - queue;
1634
1635         return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1636 }
1637
1638 /*
1639  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1640  * This interrupt handler should be used with RSS queue only.
1641  */
1642 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1643 {
1644         struct msix_entry *entry = dev_id;
1645         struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1646         struct iwl_trans *trans = trans_pcie->trans;
1647         struct iwl_rxq *rxq;
1648
1649         trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1650
1651         if (WARN_ON(entry->entry >= trans->num_rx_queues))
1652                 return IRQ_NONE;
1653
1654         if (!trans_pcie->rxq) {
1655                 if (net_ratelimit())
1656                         IWL_ERR(trans,
1657                                 "[%d] Got MSI-X interrupt before we have Rx queues\n",
1658                                 entry->entry);
1659                 return IRQ_NONE;
1660         }
1661
1662         rxq = &trans_pcie->rxq[entry->entry];
1663         lock_map_acquire(&trans->sync_cmd_lockdep_map);
1664         IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
1665
1666         local_bh_disable();
1667         if (!napi_schedule(&rxq->napi))
1668                 iwl_pcie_clear_irq(trans, entry->entry);
1669         local_bh_enable();
1670
1671         lock_map_release(&trans->sync_cmd_lockdep_map);
1672
1673         return IRQ_HANDLED;
1674 }
1675
1676 /*
1677  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1678  */
1679 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1680 {
1681         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1682         int i;
1683
1684         /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1685         if (trans->cfg->internal_wimax_coex &&
1686             !trans->cfg->apmg_not_supported &&
1687             (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1688                              APMS_CLK_VAL_MRB_FUNC_MODE) ||
1689              (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1690                             APMG_PS_CTRL_VAL_RESET_REQ))) {
1691                 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1692                 iwl_op_mode_wimax_active(trans->op_mode);
1693                 wake_up(&trans->wait_command_queue);
1694                 return;
1695         }
1696
1697         for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1698                 if (!trans_pcie->txqs.txq[i])
1699                         continue;
1700                 del_timer(&trans_pcie->txqs.txq[i]->stuck_timer);
1701         }
1702
1703         /* The STATUS_FW_ERROR bit is set in this function. This must happen
1704          * before we wake up the command caller, to ensure a proper cleanup. */
1705         iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
1706
1707         clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1708         wake_up(&trans->wait_command_queue);
1709 }
1710
1711 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1712 {
1713         u32 inta;
1714
1715         lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1716
1717         trace_iwlwifi_dev_irq(trans->dev);
1718
1719         /* Discover which interrupts are active/pending */
1720         inta = iwl_read32(trans, CSR_INT);
1721
1722         /* the thread will service interrupts and re-enable them */
1723         return inta;
1724 }
1725
1726 /* a device (PCI-E) page is 4096 bytes long */
1727 #define ICT_SHIFT       12
1728 #define ICT_SIZE        (1 << ICT_SHIFT)
1729 #define ICT_COUNT       (ICT_SIZE / sizeof(u32))
1730
1731 /* interrupt handler using ict table, with this interrupt driver will
1732  * stop using INTA register to get device's interrupt, reading this register
1733  * is expensive, device will write interrupts in ICT dram table, increment
1734  * index then will fire interrupt to driver, driver will OR all ICT table
1735  * entries from current index up to table entry with 0 value. the result is
1736  * the interrupt we need to service, driver will set the entries back to 0 and
1737  * set index.
1738  */
1739 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1740 {
1741         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1742         u32 inta;
1743         u32 val = 0;
1744         u32 read;
1745
1746         trace_iwlwifi_dev_irq(trans->dev);
1747
1748         /* Ignore interrupt if there's nothing in NIC to service.
1749          * This may be due to IRQ shared with another device,
1750          * or due to sporadic interrupts thrown from our NIC. */
1751         read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1752         trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1753         if (!read)
1754                 return 0;
1755
1756         /*
1757          * Collect all entries up to the first 0, starting from ict_index;
1758          * note we already read at ict_index.
1759          */
1760         do {
1761                 val |= read;
1762                 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1763                                 trans_pcie->ict_index, read);
1764                 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1765                 trans_pcie->ict_index =
1766                         ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1767
1768                 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1769                 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1770                                            read);
1771         } while (read);
1772
1773         /* We should not get this value, just ignore it. */
1774         if (val == 0xffffffff)
1775                 val = 0;
1776
1777         /*
1778          * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1779          * (bit 15 before shifting it to 31) to clear when using interrupt
1780          * coalescing. fortunately, bits 18 and 19 stay set when this happens
1781          * so we use them to decide on the real state of the Rx bit.
1782          * In order words, bit 15 is set if bit 18 or bit 19 are set.
1783          */
1784         if (val & 0xC0000)
1785                 val |= 0x8000;
1786
1787         inta = (0xff & val) | ((0xff00 & val) << 16);
1788         return inta;
1789 }
1790
1791 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
1792 {
1793         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1794         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1795         bool hw_rfkill, prev, report;
1796
1797         mutex_lock(&trans_pcie->mutex);
1798         prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1799         hw_rfkill = iwl_is_rfkill_set(trans);
1800         if (hw_rfkill) {
1801                 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1802                 set_bit(STATUS_RFKILL_HW, &trans->status);
1803         }
1804         if (trans_pcie->opmode_down)
1805                 report = hw_rfkill;
1806         else
1807                 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1808
1809         IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1810                  hw_rfkill ? "disable radio" : "enable radio");
1811
1812         isr_stats->rfkill++;
1813
1814         if (prev != report)
1815                 iwl_trans_pcie_rf_kill(trans, report, from_irq);
1816         mutex_unlock(&trans_pcie->mutex);
1817
1818         if (hw_rfkill) {
1819                 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1820                                        &trans->status))
1821                         IWL_DEBUG_RF_KILL(trans,
1822                                           "Rfkill while SYNC HCMD in flight\n");
1823                 wake_up(&trans->wait_command_queue);
1824         } else {
1825                 clear_bit(STATUS_RFKILL_HW, &trans->status);
1826                 if (trans_pcie->opmode_down)
1827                         clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1828         }
1829 }
1830
1831 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1832 {
1833         struct iwl_trans *trans = dev_id;
1834         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1835         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1836         u32 inta = 0;
1837         u32 handled = 0;
1838         bool polling = false;
1839
1840         lock_map_acquire(&trans->sync_cmd_lockdep_map);
1841
1842         spin_lock_bh(&trans_pcie->irq_lock);
1843
1844         /* dram interrupt table not set yet,
1845          * use legacy interrupt.
1846          */
1847         if (likely(trans_pcie->use_ict))
1848                 inta = iwl_pcie_int_cause_ict(trans);
1849         else
1850                 inta = iwl_pcie_int_cause_non_ict(trans);
1851
1852         if (iwl_have_debug_level(IWL_DL_ISR)) {
1853                 IWL_DEBUG_ISR(trans,
1854                               "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1855                               inta, trans_pcie->inta_mask,
1856                               iwl_read32(trans, CSR_INT_MASK),
1857                               iwl_read32(trans, CSR_FH_INT_STATUS));
1858                 if (inta & (~trans_pcie->inta_mask))
1859                         IWL_DEBUG_ISR(trans,
1860                                       "We got a masked interrupt (0x%08x)\n",
1861                                       inta & (~trans_pcie->inta_mask));
1862         }
1863
1864         inta &= trans_pcie->inta_mask;
1865
1866         /*
1867          * Ignore interrupt if there's nothing in NIC to service.
1868          * This may be due to IRQ shared with another device,
1869          * or due to sporadic interrupts thrown from our NIC.
1870          */
1871         if (unlikely(!inta)) {
1872                 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1873                 /*
1874                  * Re-enable interrupts here since we don't
1875                  * have anything to service
1876                  */
1877                 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1878                         _iwl_enable_interrupts(trans);
1879                 spin_unlock_bh(&trans_pcie->irq_lock);
1880                 lock_map_release(&trans->sync_cmd_lockdep_map);
1881                 return IRQ_NONE;
1882         }
1883
1884         if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {
1885                 /*
1886                  * Hardware disappeared. It might have
1887                  * already raised an interrupt.
1888                  */
1889                 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1890                 spin_unlock_bh(&trans_pcie->irq_lock);
1891                 goto out;
1892         }
1893
1894         /* Ack/clear/reset pending uCode interrupts.
1895          * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1896          */
1897         /* There is a hardware bug in the interrupt mask function that some
1898          * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1899          * they are disabled in the CSR_INT_MASK register. Furthermore the
1900          * ICT interrupt handling mechanism has another bug that might cause
1901          * these unmasked interrupts fail to be detected. We workaround the
1902          * hardware bugs here by ACKing all the possible interrupts so that
1903          * interrupt coalescing can still be achieved.
1904          */
1905         iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1906
1907         if (iwl_have_debug_level(IWL_DL_ISR))
1908                 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1909                               inta, iwl_read32(trans, CSR_INT_MASK));
1910
1911         spin_unlock_bh(&trans_pcie->irq_lock);
1912
1913         /* Now service all interrupt bits discovered above. */
1914         if (inta & CSR_INT_BIT_HW_ERR) {
1915                 IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1916
1917                 /* Tell the device to stop sending interrupts */
1918                 iwl_disable_interrupts(trans);
1919
1920                 isr_stats->hw++;
1921                 iwl_pcie_irq_handle_error(trans);
1922
1923                 handled |= CSR_INT_BIT_HW_ERR;
1924
1925                 goto out;
1926         }
1927
1928         /* NIC fires this, but we don't use it, redundant with WAKEUP */
1929         if (inta & CSR_INT_BIT_SCD) {
1930                 IWL_DEBUG_ISR(trans,
1931                               "Scheduler finished to transmit the frame/frames.\n");
1932                 isr_stats->sch++;
1933         }
1934
1935         /* Alive notification via Rx interrupt will do the real work */
1936         if (inta & CSR_INT_BIT_ALIVE) {
1937                 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1938                 isr_stats->alive++;
1939                 if (trans->trans_cfg->gen2) {
1940                         /*
1941                          * We can restock, since firmware configured
1942                          * the RFH
1943                          */
1944                         iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1945                 }
1946
1947                 handled |= CSR_INT_BIT_ALIVE;
1948         }
1949
1950         /* Safely ignore these bits for debug checks below */
1951         inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1952
1953         /* HW RF KILL switch toggled */
1954         if (inta & CSR_INT_BIT_RF_KILL) {
1955                 iwl_pcie_handle_rfkill_irq(trans, true);
1956                 handled |= CSR_INT_BIT_RF_KILL;
1957         }
1958
1959         /* Chip got too hot and stopped itself */
1960         if (inta & CSR_INT_BIT_CT_KILL) {
1961                 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1962                 isr_stats->ctkill++;
1963                 handled |= CSR_INT_BIT_CT_KILL;
1964         }
1965
1966         /* Error detected by uCode */
1967         if (inta & CSR_INT_BIT_SW_ERR) {
1968                 IWL_ERR(trans, "Microcode SW error detected. "
1969                         " Restarting 0x%X.\n", inta);
1970                 isr_stats->sw++;
1971                 iwl_pcie_irq_handle_error(trans);
1972                 handled |= CSR_INT_BIT_SW_ERR;
1973         }
1974
1975         /* uCode wakes up after power-down sleep */
1976         if (inta & CSR_INT_BIT_WAKEUP) {
1977                 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1978                 iwl_pcie_rxq_check_wrptr(trans);
1979                 iwl_pcie_txq_check_wrptrs(trans);
1980
1981                 isr_stats->wakeup++;
1982
1983                 handled |= CSR_INT_BIT_WAKEUP;
1984         }
1985
1986         /* All uCode command responses, including Tx command responses,
1987          * Rx "responses" (frame-received notification), and other
1988          * notifications from uCode come through here*/
1989         if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1990                     CSR_INT_BIT_RX_PERIODIC)) {
1991                 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1992                 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1993                         handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1994                         iwl_write32(trans, CSR_FH_INT_STATUS,
1995                                         CSR_FH_INT_RX_MASK);
1996                 }
1997                 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1998                         handled |= CSR_INT_BIT_RX_PERIODIC;
1999                         iwl_write32(trans,
2000                                 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
2001                 }
2002                 /* Sending RX interrupt require many steps to be done in the
2003                  * device:
2004                  * 1- write interrupt to current index in ICT table.
2005                  * 2- dma RX frame.
2006                  * 3- update RX shared data to indicate last write index.
2007                  * 4- send interrupt.
2008                  * This could lead to RX race, driver could receive RX interrupt
2009                  * but the shared data changes does not reflect this;
2010                  * periodic interrupt will detect any dangling Rx activity.
2011                  */
2012
2013                 /* Disable periodic interrupt; we use it as just a one-shot. */
2014                 iwl_write8(trans, CSR_INT_PERIODIC_REG,
2015                             CSR_INT_PERIODIC_DIS);
2016
2017                 /*
2018                  * Enable periodic interrupt in 8 msec only if we received
2019                  * real RX interrupt (instead of just periodic int), to catch
2020                  * any dangling Rx interrupt.  If it was just the periodic
2021                  * interrupt, there was no dangling Rx activity, and no need
2022                  * to extend the periodic interrupt; one-shot is enough.
2023                  */
2024                 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
2025                         iwl_write8(trans, CSR_INT_PERIODIC_REG,
2026                                    CSR_INT_PERIODIC_ENA);
2027
2028                 isr_stats->rx++;
2029
2030                 local_bh_disable();
2031                 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2032                         polling = true;
2033                         __napi_schedule(&trans_pcie->rxq[0].napi);
2034                 }
2035                 local_bh_enable();
2036         }
2037
2038         /* This "Tx" DMA channel is used only for loading uCode */
2039         if (inta & CSR_INT_BIT_FH_TX) {
2040                 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
2041                 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2042                 isr_stats->tx++;
2043                 handled |= CSR_INT_BIT_FH_TX;
2044                 /* Wake up uCode load routine, now that load is complete */
2045                 trans_pcie->ucode_write_complete = true;
2046                 wake_up(&trans_pcie->ucode_write_waitq);
2047                 /* Wake up IMR write routine, now that write to SRAM is complete */
2048                 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2049                         trans_pcie->imr_status = IMR_D2S_COMPLETED;
2050                         wake_up(&trans_pcie->ucode_write_waitq);
2051                 }
2052         }
2053
2054         if (inta & ~handled) {
2055                 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2056                 isr_stats->unhandled++;
2057         }
2058
2059         if (inta & ~(trans_pcie->inta_mask)) {
2060                 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2061                          inta & ~trans_pcie->inta_mask);
2062         }
2063
2064         if (!polling) {
2065                 spin_lock_bh(&trans_pcie->irq_lock);
2066                 /* only Re-enable all interrupt if disabled by irq */
2067                 if (test_bit(STATUS_INT_ENABLED, &trans->status))
2068                         _iwl_enable_interrupts(trans);
2069                 /* we are loading the firmware, enable FH_TX interrupt only */
2070                 else if (handled & CSR_INT_BIT_FH_TX)
2071                         iwl_enable_fw_load_int(trans);
2072                 /* Re-enable RF_KILL if it occurred */
2073                 else if (handled & CSR_INT_BIT_RF_KILL)
2074                         iwl_enable_rfkill_int(trans);
2075                 /* Re-enable the ALIVE / Rx interrupt if it occurred */
2076                 else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
2077                         iwl_enable_fw_load_int_ctx_info(trans);
2078                 spin_unlock_bh(&trans_pcie->irq_lock);
2079         }
2080
2081 out:
2082         lock_map_release(&trans->sync_cmd_lockdep_map);
2083         return IRQ_HANDLED;
2084 }
2085
2086 /******************************************************************************
2087  *
2088  * ICT functions
2089  *
2090  ******************************************************************************/
2091
2092 /* Free dram table */
2093 void iwl_pcie_free_ict(struct iwl_trans *trans)
2094 {
2095         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2096
2097         if (trans_pcie->ict_tbl) {
2098                 dma_free_coherent(trans->dev, ICT_SIZE,
2099                                   trans_pcie->ict_tbl,
2100                                   trans_pcie->ict_tbl_dma);
2101                 trans_pcie->ict_tbl = NULL;
2102                 trans_pcie->ict_tbl_dma = 0;
2103         }
2104 }
2105
2106 /*
2107  * allocate dram shared table, it is an aligned memory
2108  * block of ICT_SIZE.
2109  * also reset all data related to ICT table interrupt.
2110  */
2111 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2112 {
2113         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2114
2115         trans_pcie->ict_tbl =
2116                 dma_alloc_coherent(trans->dev, ICT_SIZE,
2117                                    &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2118         if (!trans_pcie->ict_tbl)
2119                 return -ENOMEM;
2120
2121         /* just an API sanity check ... it is guaranteed to be aligned */
2122         if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2123                 iwl_pcie_free_ict(trans);
2124                 return -EINVAL;
2125         }
2126
2127         return 0;
2128 }
2129
2130 /* Device is going up inform it about using ICT interrupt table,
2131  * also we need to tell the driver to start using ICT interrupt.
2132  */
2133 void iwl_pcie_reset_ict(struct iwl_trans *trans)
2134 {
2135         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2136         u32 val;
2137
2138         if (!trans_pcie->ict_tbl)
2139                 return;
2140
2141         spin_lock_bh(&trans_pcie->irq_lock);
2142         _iwl_disable_interrupts(trans);
2143
2144         memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2145
2146         val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2147
2148         val |= CSR_DRAM_INT_TBL_ENABLE |
2149                CSR_DRAM_INIT_TBL_WRAP_CHECK |
2150                CSR_DRAM_INIT_TBL_WRITE_POINTER;
2151
2152         IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2153
2154         iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2155         trans_pcie->use_ict = true;
2156         trans_pcie->ict_index = 0;
2157         iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2158         _iwl_enable_interrupts(trans);
2159         spin_unlock_bh(&trans_pcie->irq_lock);
2160 }
2161
2162 /* Device is going down disable ict interrupt usage */
2163 void iwl_pcie_disable_ict(struct iwl_trans *trans)
2164 {
2165         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2166
2167         spin_lock_bh(&trans_pcie->irq_lock);
2168         trans_pcie->use_ict = false;
2169         spin_unlock_bh(&trans_pcie->irq_lock);
2170 }
2171
2172 irqreturn_t iwl_pcie_isr(int irq, void *data)
2173 {
2174         struct iwl_trans *trans = data;
2175
2176         if (!trans)
2177                 return IRQ_NONE;
2178
2179         /* Disable (but don't clear!) interrupts here to avoid
2180          * back-to-back ISRs and sporadic interrupts from our NIC.
2181          * If we have something to service, the tasklet will re-enable ints.
2182          * If we *don't* have something, we'll re-enable before leaving here.
2183          */
2184         iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2185
2186         return IRQ_WAKE_THREAD;
2187 }
2188
2189 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2190 {
2191         return IRQ_WAKE_THREAD;
2192 }
2193
2194 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2195 {
2196         struct msix_entry *entry = dev_id;
2197         struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2198         struct iwl_trans *trans = trans_pcie->trans;
2199         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2200         u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
2201         u32 inta_fh, inta_hw;
2202         bool polling = false;
2203         bool sw_err;
2204
2205         if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2206                 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
2207
2208         if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2209                 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
2210
2211         lock_map_acquire(&trans->sync_cmd_lockdep_map);
2212
2213         spin_lock_bh(&trans_pcie->irq_lock);
2214         inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2215         inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2216         /*
2217          * Clear causes registers to avoid being handling the same cause.
2218          */
2219         iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
2220         iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2221         spin_unlock_bh(&trans_pcie->irq_lock);
2222
2223         trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2224
2225         if (unlikely(!(inta_fh | inta_hw))) {
2226                 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2227                 lock_map_release(&trans->sync_cmd_lockdep_map);
2228                 return IRQ_NONE;
2229         }
2230
2231         if (iwl_have_debug_level(IWL_DL_ISR)) {
2232                 IWL_DEBUG_ISR(trans,
2233                               "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2234                               entry->entry, inta_fh, trans_pcie->fh_mask,
2235                               iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2236                 if (inta_fh & ~trans_pcie->fh_mask)
2237                         IWL_DEBUG_ISR(trans,
2238                                       "We got a masked interrupt (0x%08x)\n",
2239                                       inta_fh & ~trans_pcie->fh_mask);
2240         }
2241
2242         inta_fh &= trans_pcie->fh_mask;
2243
2244         if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2245             inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2246                 local_bh_disable();
2247                 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2248                         polling = true;
2249                         __napi_schedule(&trans_pcie->rxq[0].napi);
2250                 }
2251                 local_bh_enable();
2252         }
2253
2254         if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2255             inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2256                 local_bh_disable();
2257                 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2258                         polling = true;
2259                         __napi_schedule(&trans_pcie->rxq[1].napi);
2260                 }
2261                 local_bh_enable();
2262         }
2263
2264         /* This "Tx" DMA channel is used only for loading uCode */
2265         if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
2266             trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2267                 IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
2268                 isr_stats->tx++;
2269
2270                 /* Wake up IMR routine once write to SRAM is complete */
2271                 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2272                         trans_pcie->imr_status = IMR_D2S_COMPLETED;
2273                         wake_up(&trans_pcie->ucode_write_waitq);
2274                 }
2275         } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2276                 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2277                 isr_stats->tx++;
2278                 /*
2279                  * Wake up uCode load routine,
2280                  * now that load is complete
2281                  */
2282                 trans_pcie->ucode_write_complete = true;
2283                 wake_up(&trans_pcie->ucode_write_waitq);
2284
2285                 /* Wake up IMR routine once write to SRAM is complete */
2286                 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2287                         trans_pcie->imr_status = IMR_D2S_COMPLETED;
2288                         wake_up(&trans_pcie->ucode_write_waitq);
2289                 }
2290         }
2291
2292         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2293                 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
2294         else
2295                 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
2296
2297         if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
2298                 IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
2299                         inta_hw);
2300                 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2301                         iwl_trans_pcie_reset(trans,
2302                                              IWL_RESET_MODE_PROD_RESET);
2303         }
2304
2305         /* Error detected by uCode */
2306         if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
2307                 IWL_ERR(trans,
2308                         "Microcode SW error detected. Restarting 0x%X.\n",
2309                         inta_fh);
2310                 isr_stats->sw++;
2311                 /* during FW reset flow report errors from there */
2312                 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2313                         trans_pcie->imr_status = IMR_D2S_ERROR;
2314                         wake_up(&trans_pcie->imr_waitq);
2315                 } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2316                         trans_pcie->fw_reset_state = FW_RESET_ERROR;
2317                         wake_up(&trans_pcie->fw_reset_waitq);
2318                 } else {
2319                         iwl_pcie_irq_handle_error(trans);
2320                 }
2321         }
2322
2323         /* After checking FH register check HW register */
2324         if (iwl_have_debug_level(IWL_DL_ISR)) {
2325                 IWL_DEBUG_ISR(trans,
2326                               "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2327                               entry->entry, inta_hw, trans_pcie->hw_mask,
2328                               iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2329                 if (inta_hw & ~trans_pcie->hw_mask)
2330                         IWL_DEBUG_ISR(trans,
2331                                       "We got a masked interrupt 0x%08x\n",
2332                                       inta_hw & ~trans_pcie->hw_mask);
2333         }
2334
2335         inta_hw &= trans_pcie->hw_mask;
2336
2337         /* Alive notification via Rx interrupt will do the real work */
2338         if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2339                 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2340                 isr_stats->alive++;
2341                 if (trans->trans_cfg->gen2) {
2342                         /* We can restock, since firmware configured the RFH */
2343                         iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2344                 }
2345         }
2346
2347         /*
2348          * In some rare cases when the HW is in a bad state, we may
2349          * get this interrupt too early, when prph_info is still NULL.
2350          * So make sure that it's not NULL to prevent crashing.
2351          */
2352         if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2353                 u32 sleep_notif =
2354                         le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2355                 if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2356                     sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2357                         IWL_DEBUG_ISR(trans,
2358                                       "Sx interrupt: sleep notification = 0x%x\n",
2359                                       sleep_notif);
2360                         trans_pcie->sx_complete = true;
2361                         wake_up(&trans_pcie->sx_waitq);
2362                 } else {
2363                         /* uCode wakes up after power-down sleep */
2364                         IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2365                         iwl_pcie_rxq_check_wrptr(trans);
2366                         iwl_pcie_txq_check_wrptrs(trans);
2367
2368                         isr_stats->wakeup++;
2369                 }
2370         }
2371
2372         /* Chip got too hot and stopped itself */
2373         if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2374                 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2375                 isr_stats->ctkill++;
2376         }
2377
2378         /* HW RF KILL switch toggled */
2379         if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2380                 iwl_pcie_handle_rfkill_irq(trans, true);
2381
2382         if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2383                 IWL_ERR(trans,
2384                         "Hardware error detected. Restarting.\n");
2385
2386                 isr_stats->hw++;
2387                 trans->dbg.hw_error = true;
2388                 iwl_pcie_irq_handle_error(trans);
2389         }
2390
2391         if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
2392                 IWL_DEBUG_ISR(trans, "Reset flow completed\n");
2393                 trans_pcie->fw_reset_state = FW_RESET_OK;
2394                 wake_up(&trans_pcie->fw_reset_waitq);
2395         }
2396
2397         if (!polling)
2398                 iwl_pcie_clear_irq(trans, entry->entry);
2399
2400         lock_map_release(&trans->sync_cmd_lockdep_map);
2401
2402         return IRQ_HANDLED;
2403 }
This page took 0.186161 seconds and 4 git commands to generate.