]>
Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
3 | * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. | |
4 | * | |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <[email protected]> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | #include <linux/sched.h> | |
30 | #include <linux/wait.h> | |
1a361cd8 | 31 | #include <linux/gfp.h> |
ab697a9f EG |
32 | |
33 | #include "iwl-dev.h" | |
34 | #include "iwl-agn.h" | |
35 | #include "iwl-core.h" | |
36 | #include "iwl-io.h" | |
37 | #include "iwl-helpers.h" | |
38 | #include "iwl-trans-int-pcie.h" | |
39 | ||
40 | /****************************************************************************** | |
41 | * | |
42 | * RX path functions | |
43 | * | |
44 | ******************************************************************************/ | |
45 | ||
46 | /* | |
47 | * Rx theory of operation | |
48 | * | |
49 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
50 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
51 | * used not only for Rx frames, but for any command response or notification | |
52 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
53 | * of indexes into the circular buffer. | |
54 | * | |
55 | * Rx Queue Indexes | |
56 | * The host/firmware share two index registers for managing the Rx buffers. | |
57 | * | |
58 | * The READ index maps to the first position that the firmware may be writing | |
59 | * to -- the driver can read up to (but not including) this position and get | |
60 | * good data. | |
61 | * The READ index is managed by the firmware once the card is enabled. | |
62 | * | |
63 | * The WRITE index maps to the last position the driver has read from -- the | |
64 | * position preceding WRITE is the last slot the firmware can place a packet. | |
65 | * | |
66 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
67 | * WRITE = READ. | |
68 | * | |
69 | * During initialization, the host sets up the READ queue position to the first | |
70 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
71 | * | |
72 | * When the firmware places a packet in a buffer, it will advance the READ index | |
73 | * and fire the RX interrupt. The driver can then query the READ index and | |
74 | * process as many packets as possible, moving the WRITE index forward as it | |
75 | * resets the Rx queue buffers with new memory. | |
76 | * | |
77 | * The management in the driver is as follows: | |
78 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | |
79 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | |
80 | * to replenish the iwl->rxq->rx_free. | |
81 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | |
82 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | |
83 | * 'processed' and 'read' driver indexes as well) | |
84 | * + A received packet is processed and handed to the kernel network stack, | |
85 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
86 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | |
87 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | |
88 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | |
89 | * were enough free buffers and RX_STALLED is set it is cleared. | |
90 | * | |
91 | * | |
92 | * Driver sequence: | |
93 | * | |
94 | * iwl_rx_queue_alloc() Allocates rx_free | |
95 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
96 | * iwl_rx_queue_restock | |
97 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | |
98 | * queue, updates firmware pointers, and updates | |
99 | * the WRITE index. If insufficient rx_free buffers | |
100 | * are available, schedules iwl_rx_replenish | |
101 | * | |
102 | * -- enable interrupts -- | |
103 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | |
104 | * READ INDEX, detaching the SKB from the pool. | |
105 | * Moves the packet buffer from queue to rx_used. | |
106 | * Calls iwl_rx_queue_restock to refill any empty | |
107 | * slots. | |
108 | * ... | |
109 | * | |
110 | */ | |
111 | ||
112 | /** | |
113 | * iwl_rx_queue_space - Return number of free slots available in queue. | |
114 | */ | |
115 | static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | |
116 | { | |
117 | int s = q->read - q->write; | |
118 | if (s <= 0) | |
119 | s += RX_QUEUE_SIZE; | |
120 | /* keep some buffer to not confuse full and empty queue */ | |
121 | s -= 2; | |
122 | if (s < 0) | |
123 | s = 0; | |
124 | return s; | |
125 | } | |
126 | ||
127 | /** | |
128 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | |
129 | */ | |
130 | void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, | |
131 | struct iwl_rx_queue *q) | |
132 | { | |
133 | unsigned long flags; | |
134 | u32 reg; | |
135 | ||
136 | spin_lock_irqsave(&q->lock, flags); | |
137 | ||
138 | if (q->need_update == 0) | |
139 | goto exit_unlock; | |
140 | ||
141 | if (priv->cfg->base_params->shadow_reg_enable) { | |
142 | /* shadow register enabled */ | |
143 | /* Device expects a multiple of 8 */ | |
144 | q->write_actual = (q->write & ~0x7); | |
145 | iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual); | |
146 | } else { | |
147 | /* If power-saving is in use, make sure device is awake */ | |
148 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | |
149 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | |
150 | ||
151 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
152 | IWL_DEBUG_INFO(priv, | |
153 | "Rx queue requesting wakeup," | |
154 | " GP1 = 0x%x\n", reg); | |
155 | iwl_set_bit(priv, CSR_GP_CNTRL, | |
156 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
157 | goto exit_unlock; | |
158 | } | |
159 | ||
160 | q->write_actual = (q->write & ~0x7); | |
161 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, | |
162 | q->write_actual); | |
163 | ||
164 | /* Else device is assumed to be awake */ | |
165 | } else { | |
166 | /* Device expects a multiple of 8 */ | |
167 | q->write_actual = (q->write & ~0x7); | |
168 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, | |
169 | q->write_actual); | |
170 | } | |
171 | } | |
172 | q->need_update = 0; | |
173 | ||
174 | exit_unlock: | |
175 | spin_unlock_irqrestore(&q->lock, flags); | |
176 | } | |
177 | ||
178 | /** | |
179 | * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
180 | */ | |
181 | static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, | |
182 | dma_addr_t dma_addr) | |
183 | { | |
184 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
185 | } | |
186 | ||
187 | /** | |
188 | * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool | |
189 | * | |
190 | * If there are slots in the RX queue that need to be restocked, | |
191 | * and we have free pre-allocated buffers, fill the ranks as much | |
192 | * as we can, pulling from rx_free. | |
193 | * | |
194 | * This moves the 'write' index forward to catch up with 'processed', and | |
195 | * also updates the memory address in the firmware to reference the new | |
196 | * target buffer. | |
197 | */ | |
198 | static void iwlagn_rx_queue_restock(struct iwl_priv *priv) | |
199 | { | |
200 | struct iwl_rx_queue *rxq = &priv->rxq; | |
201 | struct list_head *element; | |
202 | struct iwl_rx_mem_buffer *rxb; | |
203 | unsigned long flags; | |
204 | ||
205 | spin_lock_irqsave(&rxq->lock, flags); | |
206 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | |
207 | /* The overwritten rxb must be a used one */ | |
208 | rxb = rxq->queue[rxq->write]; | |
209 | BUG_ON(rxb && rxb->page); | |
210 | ||
211 | /* Get next free Rx buffer, remove from free list */ | |
212 | element = rxq->rx_free.next; | |
213 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | |
214 | list_del(element); | |
215 | ||
216 | /* Point to Rx buffer via next RBD in circular buffer */ | |
217 | rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, | |
218 | rxb->page_dma); | |
219 | rxq->queue[rxq->write] = rxb; | |
220 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
221 | rxq->free_count--; | |
222 | } | |
223 | spin_unlock_irqrestore(&rxq->lock, flags); | |
224 | /* If the pre-allocated buffer pool is dropping low, schedule to | |
225 | * refill it */ | |
226 | if (rxq->free_count <= RX_LOW_WATERMARK) | |
74e28e44 | 227 | queue_work(priv->shrd->workqueue, &priv->rx_replenish); |
ab697a9f EG |
228 | |
229 | ||
230 | /* If we've added more space for the firmware to place data, tell it. | |
231 | * Increment device's write pointer in multiples of 8. */ | |
232 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
233 | spin_lock_irqsave(&rxq->lock, flags); | |
234 | rxq->need_update = 1; | |
235 | spin_unlock_irqrestore(&rxq->lock, flags); | |
236 | iwl_rx_queue_update_write_ptr(priv, rxq); | |
237 | } | |
238 | } | |
239 | ||
240 | /** | |
241 | * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free | |
242 | * | |
243 | * When moving to rx_free an SKB is allocated for the slot. | |
244 | * | |
245 | * Also restock the Rx queue via iwl_rx_queue_restock. | |
246 | * This is called as a scheduled work item (except for during initialization) | |
247 | */ | |
248 | static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |
249 | { | |
250 | struct iwl_rx_queue *rxq = &priv->rxq; | |
251 | struct list_head *element; | |
252 | struct iwl_rx_mem_buffer *rxb; | |
253 | struct page *page; | |
254 | unsigned long flags; | |
255 | gfp_t gfp_mask = priority; | |
256 | ||
257 | while (1) { | |
258 | spin_lock_irqsave(&rxq->lock, flags); | |
259 | if (list_empty(&rxq->rx_used)) { | |
260 | spin_unlock_irqrestore(&rxq->lock, flags); | |
261 | return; | |
262 | } | |
263 | spin_unlock_irqrestore(&rxq->lock, flags); | |
264 | ||
265 | if (rxq->free_count > RX_LOW_WATERMARK) | |
266 | gfp_mask |= __GFP_NOWARN; | |
267 | ||
d6189124 | 268 | if (hw_params(priv).rx_page_order > 0) |
ab697a9f EG |
269 | gfp_mask |= __GFP_COMP; |
270 | ||
271 | /* Alloc a new receive buffer */ | |
d6189124 EG |
272 | page = alloc_pages(gfp_mask, |
273 | hw_params(priv).rx_page_order); | |
ab697a9f EG |
274 | if (!page) { |
275 | if (net_ratelimit()) | |
276 | IWL_DEBUG_INFO(priv, "alloc_pages failed, " | |
d6189124 EG |
277 | "order: %d\n", |
278 | hw_params(priv).rx_page_order); | |
ab697a9f EG |
279 | |
280 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | |
281 | net_ratelimit()) | |
282 | IWL_CRIT(priv, "Failed to alloc_pages with %s." | |
283 | "Only %u free buffers remaining.\n", | |
284 | priority == GFP_ATOMIC ? | |
285 | "GFP_ATOMIC" : "GFP_KERNEL", | |
286 | rxq->free_count); | |
287 | /* We don't reschedule replenish work here -- we will | |
288 | * call the restock method and if it still needs | |
289 | * more buffers it will schedule replenish */ | |
290 | return; | |
291 | } | |
292 | ||
293 | spin_lock_irqsave(&rxq->lock, flags); | |
294 | ||
295 | if (list_empty(&rxq->rx_used)) { | |
296 | spin_unlock_irqrestore(&rxq->lock, flags); | |
d6189124 | 297 | __free_pages(page, hw_params(priv).rx_page_order); |
ab697a9f EG |
298 | return; |
299 | } | |
300 | element = rxq->rx_used.next; | |
301 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | |
302 | list_del(element); | |
303 | ||
304 | spin_unlock_irqrestore(&rxq->lock, flags); | |
305 | ||
306 | BUG_ON(rxb->page); | |
307 | rxb->page = page; | |
308 | /* Get physical address of the RB */ | |
d5934110 | 309 | rxb->page_dma = dma_map_page(priv->bus->dev, page, 0, |
d6189124 | 310 | PAGE_SIZE << hw_params(priv).rx_page_order, |
ab697a9f EG |
311 | DMA_FROM_DEVICE); |
312 | /* dma address must be no more than 36 bits */ | |
313 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | |
314 | /* and also 256 byte aligned! */ | |
315 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | |
316 | ||
317 | spin_lock_irqsave(&rxq->lock, flags); | |
318 | ||
319 | list_add_tail(&rxb->list, &rxq->rx_free); | |
320 | rxq->free_count++; | |
321 | ||
322 | spin_unlock_irqrestore(&rxq->lock, flags); | |
323 | } | |
324 | } | |
325 | ||
326 | void iwlagn_rx_replenish(struct iwl_priv *priv) | |
327 | { | |
328 | unsigned long flags; | |
329 | ||
330 | iwlagn_rx_allocate(priv, GFP_KERNEL); | |
331 | ||
332 | spin_lock_irqsave(&priv->lock, flags); | |
333 | iwlagn_rx_queue_restock(priv); | |
334 | spin_unlock_irqrestore(&priv->lock, flags); | |
335 | } | |
336 | ||
337 | static void iwlagn_rx_replenish_now(struct iwl_priv *priv) | |
338 | { | |
339 | iwlagn_rx_allocate(priv, GFP_ATOMIC); | |
340 | ||
341 | iwlagn_rx_queue_restock(priv); | |
342 | } | |
343 | ||
344 | void iwl_bg_rx_replenish(struct work_struct *data) | |
345 | { | |
346 | struct iwl_priv *priv = | |
347 | container_of(data, struct iwl_priv, rx_replenish); | |
348 | ||
349 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
350 | return; | |
351 | ||
352 | mutex_lock(&priv->mutex); | |
353 | iwlagn_rx_replenish(priv); | |
354 | mutex_unlock(&priv->mutex); | |
355 | } | |
356 | ||
357 | /** | |
358 | * iwl_rx_handle - Main entry function for receiving responses from uCode | |
359 | * | |
360 | * Uses the priv->rx_handlers callback function array to invoke | |
361 | * the appropriate handlers, including command responses, | |
362 | * frame-received notifications, and other notifications. | |
363 | */ | |
364 | static void iwl_rx_handle(struct iwl_priv *priv) | |
365 | { | |
366 | struct iwl_rx_mem_buffer *rxb; | |
367 | struct iwl_rx_packet *pkt; | |
368 | struct iwl_rx_queue *rxq = &priv->rxq; | |
369 | u32 r, i; | |
370 | int reclaim; | |
371 | unsigned long flags; | |
372 | u8 fill_rx = 0; | |
373 | u32 count = 8; | |
374 | int total_empty; | |
375 | ||
376 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | |
377 | * buffer that the driver may process (last buffer filled by ucode). */ | |
378 | r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; | |
379 | i = rxq->read; | |
380 | ||
381 | /* Rx interrupt, but nothing sent from uCode */ | |
382 | if (i == r) | |
383 | IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); | |
384 | ||
385 | /* calculate total frames need to be restock after handling RX */ | |
386 | total_empty = r - rxq->write_actual; | |
387 | if (total_empty < 0) | |
388 | total_empty += RX_QUEUE_SIZE; | |
389 | ||
390 | if (total_empty > (RX_QUEUE_SIZE / 2)) | |
391 | fill_rx = 1; | |
392 | ||
393 | while (i != r) { | |
394 | int len; | |
395 | ||
396 | rxb = rxq->queue[i]; | |
397 | ||
398 | /* If an RXB doesn't have a Rx queue slot associated with it, | |
399 | * then a bug has been introduced in the queue refilling | |
400 | * routines -- catch it here */ | |
401 | if (WARN_ON(rxb == NULL)) { | |
402 | i = (i + 1) & RX_QUEUE_MASK; | |
403 | continue; | |
404 | } | |
405 | ||
406 | rxq->queue[i] = NULL; | |
407 | ||
d5934110 | 408 | dma_unmap_page(priv->bus->dev, rxb->page_dma, |
d6189124 | 409 | PAGE_SIZE << hw_params(priv).rx_page_order, |
ab697a9f EG |
410 | DMA_FROM_DEVICE); |
411 | pkt = rxb_addr(rxb); | |
412 | ||
413 | IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, | |
414 | i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | |
415 | ||
416 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | |
417 | len += sizeof(u32); /* account for status word */ | |
418 | trace_iwlwifi_dev_rx(priv, pkt, len); | |
419 | ||
420 | /* Reclaim a command buffer only if this packet is a response | |
421 | * to a (driver-originated) command. | |
422 | * If the packet (e.g. Rx frame) originated from uCode, | |
423 | * there is no command buffer to reclaim. | |
424 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
425 | * but apparently a few don't get set; catch them here. */ | |
426 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && | |
427 | (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && | |
428 | (pkt->hdr.cmd != REPLY_RX) && | |
429 | (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && | |
430 | (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && | |
431 | (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && | |
432 | (pkt->hdr.cmd != REPLY_TX); | |
433 | ||
434 | iwl_rx_dispatch(priv, rxb); | |
435 | ||
436 | /* | |
437 | * XXX: After here, we should always check rxb->page | |
438 | * against NULL before touching it or its virtual | |
439 | * memory (pkt). Because some rx_handler might have | |
440 | * already taken or freed the pages. | |
441 | */ | |
442 | ||
443 | if (reclaim) { | |
444 | /* Invoke any callbacks, transfer the buffer to caller, | |
445 | * and fire off the (possibly) blocking | |
446 | * trans_send_cmd() | |
447 | * as we reclaim the driver command queue */ | |
448 | if (rxb->page) | |
449 | iwl_tx_cmd_complete(priv, rxb); | |
450 | else | |
451 | IWL_WARN(priv, "Claim null rxb?\n"); | |
452 | } | |
453 | ||
454 | /* Reuse the page if possible. For notification packets and | |
455 | * SKBs that fail to Rx correctly, add them back into the | |
456 | * rx_free list for reuse later. */ | |
457 | spin_lock_irqsave(&rxq->lock, flags); | |
458 | if (rxb->page != NULL) { | |
d5934110 | 459 | rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page, |
d6189124 EG |
460 | 0, PAGE_SIZE << |
461 | hw_params(priv).rx_page_order, | |
ab697a9f EG |
462 | DMA_FROM_DEVICE); |
463 | list_add_tail(&rxb->list, &rxq->rx_free); | |
464 | rxq->free_count++; | |
465 | } else | |
466 | list_add_tail(&rxb->list, &rxq->rx_used); | |
467 | ||
468 | spin_unlock_irqrestore(&rxq->lock, flags); | |
469 | ||
470 | i = (i + 1) & RX_QUEUE_MASK; | |
471 | /* If there are a lot of unused frames, | |
472 | * restock the Rx queue so ucode wont assert. */ | |
473 | if (fill_rx) { | |
474 | count++; | |
475 | if (count >= 8) { | |
476 | rxq->read = i; | |
477 | iwlagn_rx_replenish_now(priv); | |
478 | count = 0; | |
479 | } | |
480 | } | |
481 | } | |
482 | ||
483 | /* Backtrack one entry */ | |
484 | rxq->read = i; | |
485 | if (fill_rx) | |
486 | iwlagn_rx_replenish_now(priv); | |
487 | else | |
488 | iwlagn_rx_queue_restock(priv); | |
489 | } | |
490 | ||
491 | /* tasklet for iwlagn interrupt */ | |
492 | void iwl_irq_tasklet(struct iwl_priv *priv) | |
493 | { | |
494 | u32 inta = 0; | |
495 | u32 handled = 0; | |
496 | unsigned long flags; | |
497 | u32 i; | |
498 | #ifdef CONFIG_IWLWIFI_DEBUG | |
499 | u32 inta_mask; | |
500 | #endif | |
501 | ||
502 | spin_lock_irqsave(&priv->lock, flags); | |
503 | ||
504 | /* Ack/clear/reset pending uCode interrupts. | |
505 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
506 | */ | |
507 | /* There is a hardware bug in the interrupt mask function that some | |
508 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
509 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
510 | * ICT interrupt handling mechanism has another bug that might cause | |
511 | * these unmasked interrupts fail to be detected. We workaround the | |
512 | * hardware bugs here by ACKing all the possible interrupts so that | |
513 | * interrupt coalescing can still be achieved. | |
514 | */ | |
898ed67b | 515 | iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); |
ab697a9f | 516 | |
898ed67b | 517 | inta = priv->inta; |
ab697a9f EG |
518 | |
519 | #ifdef CONFIG_IWLWIFI_DEBUG | |
8f470ce3 | 520 | if (iwl_get_debug_level(priv->shrd) & IWL_DL_ISR) { |
ab697a9f EG |
521 | /* just for debug */ |
522 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | |
523 | IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", | |
524 | inta, inta_mask); | |
525 | } | |
526 | #endif | |
527 | ||
528 | spin_unlock_irqrestore(&priv->lock, flags); | |
529 | ||
898ed67b WYG |
530 | /* saved interrupt in inta variable now we can reset priv->inta */ |
531 | priv->inta = 0; | |
ab697a9f EG |
532 | |
533 | /* Now service all interrupt bits discovered above. */ | |
534 | if (inta & CSR_INT_BIT_HW_ERR) { | |
535 | IWL_ERR(priv, "Hardware error detected. Restarting.\n"); | |
536 | ||
537 | /* Tell the device to stop sending interrupts */ | |
538 | iwl_disable_interrupts(priv); | |
539 | ||
540 | priv->isr_stats.hw++; | |
541 | iwl_irq_handle_error(priv); | |
542 | ||
543 | handled |= CSR_INT_BIT_HW_ERR; | |
544 | ||
545 | return; | |
546 | } | |
547 | ||
548 | #ifdef CONFIG_IWLWIFI_DEBUG | |
8f470ce3 | 549 | if (iwl_get_debug_level(priv->shrd) & (IWL_DL_ISR)) { |
ab697a9f EG |
550 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
551 | if (inta & CSR_INT_BIT_SCD) { | |
552 | IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " | |
553 | "the frame/frames.\n"); | |
554 | priv->isr_stats.sch++; | |
555 | } | |
556 | ||
557 | /* Alive notification via Rx interrupt will do the real work */ | |
558 | if (inta & CSR_INT_BIT_ALIVE) { | |
559 | IWL_DEBUG_ISR(priv, "Alive interrupt\n"); | |
560 | priv->isr_stats.alive++; | |
561 | } | |
562 | } | |
563 | #endif | |
564 | /* Safely ignore these bits for debug checks below */ | |
565 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
566 | ||
567 | /* HW RF KILL switch toggled */ | |
568 | if (inta & CSR_INT_BIT_RF_KILL) { | |
569 | int hw_rf_kill = 0; | |
570 | if (!(iwl_read32(priv, CSR_GP_CNTRL) & | |
571 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | |
572 | hw_rf_kill = 1; | |
573 | ||
574 | IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", | |
575 | hw_rf_kill ? "disable radio" : "enable radio"); | |
576 | ||
577 | priv->isr_stats.rfkill++; | |
578 | ||
579 | /* driver only loads ucode once setting the interface up. | |
580 | * the driver allows loading the ucode even if the radio | |
581 | * is killed. Hence update the killswitch state here. The | |
582 | * rfkill handler will care about restarting if needed. | |
583 | */ | |
584 | if (!test_bit(STATUS_ALIVE, &priv->status)) { | |
585 | if (hw_rf_kill) | |
586 | set_bit(STATUS_RF_KILL_HW, &priv->status); | |
587 | else | |
588 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | |
589 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); | |
590 | } | |
591 | ||
592 | handled |= CSR_INT_BIT_RF_KILL; | |
593 | } | |
594 | ||
595 | /* Chip got too hot and stopped itself */ | |
596 | if (inta & CSR_INT_BIT_CT_KILL) { | |
597 | IWL_ERR(priv, "Microcode CT kill error detected.\n"); | |
598 | priv->isr_stats.ctkill++; | |
599 | handled |= CSR_INT_BIT_CT_KILL; | |
600 | } | |
601 | ||
602 | /* Error detected by uCode */ | |
603 | if (inta & CSR_INT_BIT_SW_ERR) { | |
604 | IWL_ERR(priv, "Microcode SW error detected. " | |
605 | " Restarting 0x%X.\n", inta); | |
606 | priv->isr_stats.sw++; | |
607 | iwl_irq_handle_error(priv); | |
608 | handled |= CSR_INT_BIT_SW_ERR; | |
609 | } | |
610 | ||
611 | /* uCode wakes up after power-down sleep */ | |
612 | if (inta & CSR_INT_BIT_WAKEUP) { | |
613 | IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); | |
614 | iwl_rx_queue_update_write_ptr(priv, &priv->rxq); | |
d6189124 | 615 | for (i = 0; i < hw_params(priv).max_txq_num; i++) |
ab697a9f EG |
616 | iwl_txq_update_write_ptr(priv, &priv->txq[i]); |
617 | ||
618 | priv->isr_stats.wakeup++; | |
619 | ||
620 | handled |= CSR_INT_BIT_WAKEUP; | |
621 | } | |
622 | ||
623 | /* All uCode command responses, including Tx command responses, | |
624 | * Rx "responses" (frame-received notification), and other | |
625 | * notifications from uCode come through here*/ | |
626 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
627 | CSR_INT_BIT_RX_PERIODIC)) { | |
628 | IWL_DEBUG_ISR(priv, "Rx interrupt\n"); | |
629 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | |
630 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
631 | iwl_write32(priv, CSR_FH_INT_STATUS, | |
632 | CSR_FH_INT_RX_MASK); | |
633 | } | |
634 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
635 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
636 | iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); | |
637 | } | |
638 | /* Sending RX interrupt require many steps to be done in the | |
639 | * the device: | |
640 | * 1- write interrupt to current index in ICT table. | |
641 | * 2- dma RX frame. | |
642 | * 3- update RX shared data to indicate last write index. | |
643 | * 4- send interrupt. | |
644 | * This could lead to RX race, driver could receive RX interrupt | |
645 | * but the shared data changes does not reflect this; | |
646 | * periodic interrupt will detect any dangling Rx activity. | |
647 | */ | |
648 | ||
649 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
650 | iwl_write8(priv, CSR_INT_PERIODIC_REG, | |
651 | CSR_INT_PERIODIC_DIS); | |
652 | iwl_rx_handle(priv); | |
653 | ||
654 | /* | |
655 | * Enable periodic interrupt in 8 msec only if we received | |
656 | * real RX interrupt (instead of just periodic int), to catch | |
657 | * any dangling Rx interrupt. If it was just the periodic | |
658 | * interrupt, there was no dangling Rx activity, and no need | |
659 | * to extend the periodic interrupt; one-shot is enough. | |
660 | */ | |
661 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
662 | iwl_write8(priv, CSR_INT_PERIODIC_REG, | |
663 | CSR_INT_PERIODIC_ENA); | |
664 | ||
665 | priv->isr_stats.rx++; | |
666 | } | |
667 | ||
668 | /* This "Tx" DMA channel is used only for loading uCode */ | |
669 | if (inta & CSR_INT_BIT_FH_TX) { | |
670 | iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); | |
671 | IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); | |
672 | priv->isr_stats.tx++; | |
673 | handled |= CSR_INT_BIT_FH_TX; | |
674 | /* Wake up uCode load routine, now that load is complete */ | |
675 | priv->ucode_write_complete = 1; | |
676 | wake_up_interruptible(&priv->wait_command_queue); | |
677 | } | |
678 | ||
679 | if (inta & ~handled) { | |
680 | IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); | |
681 | priv->isr_stats.unhandled++; | |
682 | } | |
683 | ||
684 | if (inta & ~(priv->inta_mask)) { | |
685 | IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", | |
686 | inta & ~priv->inta_mask); | |
687 | } | |
688 | ||
689 | /* Re-enable all interrupts */ | |
690 | /* only Re-enable if disabled by irq */ | |
691 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | |
692 | iwl_enable_interrupts(priv); | |
693 | /* Re-enable RF_KILL if it occurred */ | |
694 | else if (handled & CSR_INT_BIT_RF_KILL) | |
695 | iwl_enable_rfkill_int(priv); | |
696 | } | |
697 | ||
1a361cd8 EG |
698 | /****************************************************************************** |
699 | * | |
700 | * ICT functions | |
701 | * | |
702 | ******************************************************************************/ | |
703 | #define ICT_COUNT (PAGE_SIZE/sizeof(u32)) | |
704 | ||
705 | /* Free dram table */ | |
706 | void iwl_free_isr_ict(struct iwl_priv *priv) | |
707 | { | |
898ed67b | 708 | if (priv->ict_tbl_vir) { |
d5934110 | 709 | dma_free_coherent(priv->bus->dev, |
1a361cd8 | 710 | (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, |
898ed67b WYG |
711 | priv->ict_tbl_vir, |
712 | priv->ict_tbl_dma); | |
713 | priv->ict_tbl_vir = NULL; | |
714 | memset(&priv->ict_tbl_dma, 0, | |
715 | sizeof(priv->ict_tbl_dma)); | |
716 | memset(&priv->aligned_ict_tbl_dma, 0, | |
717 | sizeof(priv->aligned_ict_tbl_dma)); | |
1a361cd8 EG |
718 | } |
719 | } | |
720 | ||
721 | ||
722 | /* allocate dram shared table it is a PAGE_SIZE aligned | |
723 | * also reset all data related to ICT table interrupt. | |
724 | */ | |
725 | int iwl_alloc_isr_ict(struct iwl_priv *priv) | |
726 | { | |
727 | ||
728 | /* allocate shrared data table */ | |
898ed67b | 729 | priv->ict_tbl_vir = |
d5934110 | 730 | dma_alloc_coherent(priv->bus->dev, |
1a361cd8 | 731 | (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, |
898ed67b WYG |
732 | &priv->ict_tbl_dma, GFP_KERNEL); |
733 | if (!priv->ict_tbl_vir) | |
1a361cd8 EG |
734 | return -ENOMEM; |
735 | ||
736 | /* align table to PAGE_SIZE boundary */ | |
898ed67b WYG |
737 | priv->aligned_ict_tbl_dma = |
738 | ALIGN(priv->ict_tbl_dma, PAGE_SIZE); | |
1a361cd8 EG |
739 | |
740 | IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n", | |
898ed67b WYG |
741 | (unsigned long long)priv->ict_tbl_dma, |
742 | (unsigned long long)priv->aligned_ict_tbl_dma, | |
743 | (int)(priv->aligned_ict_tbl_dma - | |
744 | priv->ict_tbl_dma)); | |
1a361cd8 | 745 | |
898ed67b WYG |
746 | priv->ict_tbl = priv->ict_tbl_vir + |
747 | (priv->aligned_ict_tbl_dma - | |
748 | priv->ict_tbl_dma); | |
1a361cd8 EG |
749 | |
750 | IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n", | |
898ed67b WYG |
751 | priv->ict_tbl, priv->ict_tbl_vir, |
752 | (int)(priv->aligned_ict_tbl_dma - | |
753 | priv->ict_tbl_dma)); | |
1a361cd8 EG |
754 | |
755 | /* reset table and index to all 0 */ | |
898ed67b | 756 | memset(priv->ict_tbl_vir, 0, |
1a361cd8 | 757 | (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); |
898ed67b | 758 | priv->ict_index = 0; |
1a361cd8 EG |
759 | |
760 | /* add periodic RX interrupt */ | |
761 | priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC; | |
762 | return 0; | |
763 | } | |
764 | ||
765 | /* Device is going up inform it about using ICT interrupt table, | |
766 | * also we need to tell the driver to start using ICT interrupt. | |
767 | */ | |
768 | int iwl_reset_ict(struct iwl_priv *priv) | |
769 | { | |
770 | u32 val; | |
771 | unsigned long flags; | |
772 | ||
898ed67b | 773 | if (!priv->ict_tbl_vir) |
1a361cd8 EG |
774 | return 0; |
775 | ||
776 | spin_lock_irqsave(&priv->lock, flags); | |
777 | iwl_disable_interrupts(priv); | |
778 | ||
898ed67b | 779 | memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); |
1a361cd8 | 780 | |
898ed67b | 781 | val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT; |
1a361cd8 EG |
782 | |
783 | val |= CSR_DRAM_INT_TBL_ENABLE; | |
784 | val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; | |
785 | ||
786 | IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X " | |
787 | "aligned dma address %Lx\n", | |
788 | val, | |
898ed67b | 789 | (unsigned long long)priv->aligned_ict_tbl_dma); |
1a361cd8 EG |
790 | |
791 | iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val); | |
898ed67b WYG |
792 | priv->use_ict = true; |
793 | priv->ict_index = 0; | |
1a361cd8 EG |
794 | iwl_write32(priv, CSR_INT, priv->inta_mask); |
795 | iwl_enable_interrupts(priv); | |
796 | spin_unlock_irqrestore(&priv->lock, flags); | |
797 | ||
798 | return 0; | |
799 | } | |
800 | ||
801 | /* Device is going down disable ict interrupt usage */ | |
802 | void iwl_disable_ict(struct iwl_priv *priv) | |
803 | { | |
804 | unsigned long flags; | |
805 | ||
806 | spin_lock_irqsave(&priv->lock, flags); | |
898ed67b | 807 | priv->use_ict = false; |
1a361cd8 EG |
808 | spin_unlock_irqrestore(&priv->lock, flags); |
809 | } | |
810 | ||
811 | static irqreturn_t iwl_isr(int irq, void *data) | |
812 | { | |
813 | struct iwl_priv *priv = data; | |
814 | u32 inta, inta_mask; | |
815 | unsigned long flags; | |
816 | #ifdef CONFIG_IWLWIFI_DEBUG | |
817 | u32 inta_fh; | |
818 | #endif | |
819 | if (!priv) | |
820 | return IRQ_NONE; | |
821 | ||
822 | spin_lock_irqsave(&priv->lock, flags); | |
823 | ||
824 | /* Disable (but don't clear!) interrupts here to avoid | |
825 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
826 | * If we have something to service, the tasklet will re-enable ints. | |
827 | * If we *don't* have something, we'll re-enable before leaving here. */ | |
828 | inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ | |
829 | iwl_write32(priv, CSR_INT_MASK, 0x00000000); | |
830 | ||
831 | /* Discover which interrupts are active/pending */ | |
832 | inta = iwl_read32(priv, CSR_INT); | |
833 | ||
834 | /* Ignore interrupt if there's nothing in NIC to service. | |
835 | * This may be due to IRQ shared with another device, | |
836 | * or due to sporadic interrupts thrown from our NIC. */ | |
837 | if (!inta) { | |
838 | IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); | |
839 | goto none; | |
840 | } | |
841 | ||
842 | if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { | |
843 | /* Hardware disappeared. It might have already raised | |
844 | * an interrupt */ | |
845 | IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); | |
846 | goto unplugged; | |
847 | } | |
848 | ||
849 | #ifdef CONFIG_IWLWIFI_DEBUG | |
8f470ce3 | 850 | if (iwl_get_debug_level(priv->shrd) & (IWL_DL_ISR)) { |
1a361cd8 EG |
851 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); |
852 | IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, " | |
853 | "fh 0x%08x\n", inta, inta_mask, inta_fh); | |
854 | } | |
855 | #endif | |
856 | ||
898ed67b | 857 | priv->inta |= inta; |
1a361cd8 EG |
858 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ |
859 | if (likely(inta)) | |
860 | tasklet_schedule(&priv->irq_tasklet); | |
861 | else if (test_bit(STATUS_INT_ENABLED, &priv->status) && | |
898ed67b | 862 | !priv->inta) |
1a361cd8 EG |
863 | iwl_enable_interrupts(priv); |
864 | ||
865 | unplugged: | |
866 | spin_unlock_irqrestore(&priv->lock, flags); | |
867 | return IRQ_HANDLED; | |
868 | ||
869 | none: | |
870 | /* re-enable interrupts here since we don't have anything to service. */ | |
871 | /* only Re-enable if disabled by irq and no schedules tasklet. */ | |
898ed67b | 872 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) |
1a361cd8 EG |
873 | iwl_enable_interrupts(priv); |
874 | ||
875 | spin_unlock_irqrestore(&priv->lock, flags); | |
876 | return IRQ_NONE; | |
877 | } | |
878 | ||
879 | /* interrupt handler using ict table, with this interrupt driver will | |
880 | * stop using INTA register to get device's interrupt, reading this register | |
881 | * is expensive, device will write interrupts in ICT dram table, increment | |
882 | * index then will fire interrupt to driver, driver will OR all ICT table | |
883 | * entries from current index up to table entry with 0 value. the result is | |
884 | * the interrupt we need to service, driver will set the entries back to 0 and | |
885 | * set index. | |
886 | */ | |
887 | irqreturn_t iwl_isr_ict(int irq, void *data) | |
888 | { | |
889 | struct iwl_priv *priv = data; | |
890 | u32 inta, inta_mask; | |
891 | u32 val = 0; | |
892 | unsigned long flags; | |
893 | ||
894 | if (!priv) | |
895 | return IRQ_NONE; | |
896 | ||
897 | /* dram interrupt table not set yet, | |
898 | * use legacy interrupt. | |
899 | */ | |
898ed67b | 900 | if (!priv->use_ict) |
1a361cd8 EG |
901 | return iwl_isr(irq, data); |
902 | ||
903 | spin_lock_irqsave(&priv->lock, flags); | |
904 | ||
905 | /* Disable (but don't clear!) interrupts here to avoid | |
906 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
907 | * If we have something to service, the tasklet will re-enable ints. | |
908 | * If we *don't* have something, we'll re-enable before leaving here. | |
909 | */ | |
910 | inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ | |
911 | iwl_write32(priv, CSR_INT_MASK, 0x00000000); | |
912 | ||
913 | ||
914 | /* Ignore interrupt if there's nothing in NIC to service. | |
915 | * This may be due to IRQ shared with another device, | |
916 | * or due to sporadic interrupts thrown from our NIC. */ | |
898ed67b | 917 | if (!priv->ict_tbl[priv->ict_index]) { |
1a361cd8 EG |
918 | IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); |
919 | goto none; | |
920 | } | |
921 | ||
922 | /* read all entries that not 0 start with ict_index */ | |
898ed67b | 923 | while (priv->ict_tbl[priv->ict_index]) { |
1a361cd8 | 924 | |
898ed67b | 925 | val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]); |
1a361cd8 | 926 | IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n", |
898ed67b | 927 | priv->ict_index, |
1a361cd8 | 928 | le32_to_cpu( |
898ed67b WYG |
929 | priv->ict_tbl[priv->ict_index])); |
930 | priv->ict_tbl[priv->ict_index] = 0; | |
931 | priv->ict_index = iwl_queue_inc_wrap(priv->ict_index, | |
1a361cd8 EG |
932 | ICT_COUNT); |
933 | ||
934 | } | |
935 | ||
936 | /* We should not get this value, just ignore it. */ | |
937 | if (val == 0xffffffff) | |
938 | val = 0; | |
939 | ||
940 | /* | |
941 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
942 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
943 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
944 | * so we use them to decide on the real state of the Rx bit. | |
945 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
946 | */ | |
947 | if (val & 0xC0000) | |
948 | val |= 0x8000; | |
949 | ||
950 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
951 | IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", | |
952 | inta, inta_mask, val); | |
953 | ||
954 | inta &= priv->inta_mask; | |
898ed67b | 955 | priv->inta |= inta; |
1a361cd8 EG |
956 | |
957 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | |
958 | if (likely(inta)) | |
959 | tasklet_schedule(&priv->irq_tasklet); | |
960 | else if (test_bit(STATUS_INT_ENABLED, &priv->status) && | |
898ed67b | 961 | !priv->inta) { |
1a361cd8 EG |
962 | /* Allow interrupt if was disabled by this handler and |
963 | * no tasklet was schedules, We should not enable interrupt, | |
964 | * tasklet will enable it. | |
965 | */ | |
966 | iwl_enable_interrupts(priv); | |
967 | } | |
968 | ||
969 | spin_unlock_irqrestore(&priv->lock, flags); | |
970 | return IRQ_HANDLED; | |
971 | ||
972 | none: | |
973 | /* re-enable interrupts here since we don't have anything to service. | |
974 | * only Re-enable if disabled by irq. | |
975 | */ | |
898ed67b | 976 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) |
1a361cd8 EG |
977 | iwl_enable_interrupts(priv); |
978 | ||
979 | spin_unlock_irqrestore(&priv->lock, flags); | |
980 | return IRQ_NONE; | |
981 | } |