]>
Commit | Line | Data |
---|---|---|
1 | /****************************************************************************** | |
2 | * | |
3 | * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. | |
4 | * | |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <[email protected]> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
30 | #include <linux/kernel.h> | |
31 | #include <linux/module.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/pci.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/skbuff.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/wireless.h> | |
39 | #include <linux/firmware.h> | |
40 | #include <linux/etherdevice.h> | |
41 | #include <linux/if_arp.h> | |
42 | ||
43 | #include <net/mac80211.h> | |
44 | ||
45 | #include <asm/div64.h> | |
46 | ||
47 | #define DRV_NAME "iwlagn" | |
48 | ||
49 | #include "iwl-eeprom.h" | |
50 | #include "iwl-dev.h" | |
51 | #include "iwl-core.h" | |
52 | #include "iwl-io.h" | |
53 | #include "iwl-helpers.h" | |
54 | #include "iwl-sta.h" | |
55 | #include "iwl-calib.h" | |
56 | ||
57 | ||
58 | /****************************************************************************** | |
59 | * | |
60 | * module boiler plate | |
61 | * | |
62 | ******************************************************************************/ | |
63 | ||
64 | /* | |
65 | * module name, copyright, version, etc. | |
66 | */ | |
67 | #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux" | |
68 | ||
69 | #ifdef CONFIG_IWLWIFI_DEBUG | |
70 | #define VD "d" | |
71 | #else | |
72 | #define VD | |
73 | #endif | |
74 | ||
75 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | |
76 | #define VS "s" | |
77 | #else | |
78 | #define VS | |
79 | #endif | |
80 | ||
81 | #define DRV_VERSION IWLWIFI_VERSION VD VS | |
82 | ||
83 | ||
84 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
85 | MODULE_VERSION(DRV_VERSION); | |
86 | MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); | |
87 | MODULE_LICENSE("GPL"); | |
88 | MODULE_ALIAS("iwl4965"); | |
89 | ||
90 | /*************** STATION TABLE MANAGEMENT **** | |
91 | * mac80211 should be examined to determine if sta_info is duplicating | |
92 | * the functionality provided here | |
93 | */ | |
94 | ||
95 | /**************************************************************/ | |
96 | ||
97 | /** | |
98 | * iwl_commit_rxon - commit staging_rxon to hardware | |
99 | * | |
100 | * The RXON command in staging_rxon is committed to the hardware and | |
101 | * the active_rxon structure is updated with the new data. This | |
102 | * function correctly transitions out of the RXON_ASSOC_MSK state if | |
103 | * a HW tune is required based on the RXON structure changes. | |
104 | */ | |
105 | int iwl_commit_rxon(struct iwl_priv *priv) | |
106 | { | |
107 | /* cast away the const for active_rxon in this function */ | |
108 | struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; | |
109 | int ret; | |
110 | bool new_assoc = | |
111 | !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); | |
112 | ||
113 | if (!iwl_is_alive(priv)) | |
114 | return -EBUSY; | |
115 | ||
116 | /* always get timestamp with Rx frame */ | |
117 | priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; | |
118 | /* allow CTS-to-self if possible. this is relevant only for | |
119 | * 5000, but will not damage 4965 */ | |
120 | priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; | |
121 | ||
122 | ret = iwl_check_rxon_cmd(priv); | |
123 | if (ret) { | |
124 | IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); | |
125 | return -EINVAL; | |
126 | } | |
127 | ||
128 | /* If we don't need to send a full RXON, we can use | |
129 | * iwl_rxon_assoc_cmd which is used to reconfigure filter | |
130 | * and other flags for the current radio configuration. */ | |
131 | if (!iwl_full_rxon_required(priv)) { | |
132 | ret = iwl_send_rxon_assoc(priv); | |
133 | if (ret) { | |
134 | IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); | |
135 | return ret; | |
136 | } | |
137 | ||
138 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | |
139 | return 0; | |
140 | } | |
141 | ||
142 | /* station table will be cleared */ | |
143 | priv->assoc_station_added = 0; | |
144 | ||
145 | /* If we are currently associated and the new config requires | |
146 | * an RXON_ASSOC and the new config wants the associated mask enabled, | |
147 | * we must clear the associated from the active configuration | |
148 | * before we apply the new config */ | |
149 | if (iwl_is_associated(priv) && new_assoc) { | |
150 | IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); | |
151 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | |
152 | ||
153 | ret = iwl_send_cmd_pdu(priv, REPLY_RXON, | |
154 | sizeof(struct iwl_rxon_cmd), | |
155 | &priv->active_rxon); | |
156 | ||
157 | /* If the mask clearing failed then we set | |
158 | * active_rxon back to what it was previously */ | |
159 | if (ret) { | |
160 | active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; | |
161 | IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); | |
162 | return ret; | |
163 | } | |
164 | } | |
165 | ||
166 | IWL_DEBUG_INFO(priv, "Sending RXON\n" | |
167 | "* with%s RXON_FILTER_ASSOC_MSK\n" | |
168 | "* channel = %d\n" | |
169 | "* bssid = %pM\n", | |
170 | (new_assoc ? "" : "out"), | |
171 | le16_to_cpu(priv->staging_rxon.channel), | |
172 | priv->staging_rxon.bssid_addr); | |
173 | ||
174 | iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); | |
175 | ||
176 | /* Apply the new configuration | |
177 | * RXON unassoc clears the station table in uCode, send it before | |
178 | * we add the bcast station. If assoc bit is set, we will send RXON | |
179 | * after having added the bcast and bssid station. | |
180 | */ | |
181 | if (!new_assoc) { | |
182 | ret = iwl_send_cmd_pdu(priv, REPLY_RXON, | |
183 | sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); | |
184 | if (ret) { | |
185 | IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); | |
186 | return ret; | |
187 | } | |
188 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | |
189 | } | |
190 | ||
191 | iwl_clear_stations_table(priv); | |
192 | ||
193 | priv->start_calib = 0; | |
194 | ||
195 | /* Add the broadcast address so we can send broadcast frames */ | |
196 | if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) == | |
197 | IWL_INVALID_STATION) { | |
198 | IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n"); | |
199 | return -EIO; | |
200 | } | |
201 | ||
202 | /* If we have set the ASSOC_MSK and we are in BSS mode then | |
203 | * add the IWL_AP_ID to the station rate table */ | |
204 | if (new_assoc) { | |
205 | if (priv->iw_mode == NL80211_IFTYPE_STATION) { | |
206 | ret = iwl_rxon_add_station(priv, | |
207 | priv->active_rxon.bssid_addr, 1); | |
208 | if (ret == IWL_INVALID_STATION) { | |
209 | IWL_ERR(priv, | |
210 | "Error adding AP address for TX.\n"); | |
211 | return -EIO; | |
212 | } | |
213 | priv->assoc_station_added = 1; | |
214 | if (priv->default_wep_key && | |
215 | iwl_send_static_wepkey_cmd(priv, 0)) | |
216 | IWL_ERR(priv, | |
217 | "Could not send WEP static key.\n"); | |
218 | } | |
219 | ||
220 | /* Apply the new configuration | |
221 | * RXON assoc doesn't clear the station table in uCode, | |
222 | */ | |
223 | ret = iwl_send_cmd_pdu(priv, REPLY_RXON, | |
224 | sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); | |
225 | if (ret) { | |
226 | IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); | |
227 | return ret; | |
228 | } | |
229 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | |
230 | } | |
231 | ||
232 | iwl_init_sensitivity(priv); | |
233 | ||
234 | /* If we issue a new RXON command which required a tune then we must | |
235 | * send a new TXPOWER command or we won't be able to Tx any frames */ | |
236 | ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); | |
237 | if (ret) { | |
238 | IWL_ERR(priv, "Error sending TX power (%d)\n", ret); | |
239 | return ret; | |
240 | } | |
241 | ||
242 | return 0; | |
243 | } | |
244 | ||
245 | void iwl_update_chain_flags(struct iwl_priv *priv) | |
246 | { | |
247 | ||
248 | if (priv->cfg->ops->hcmd->set_rxon_chain) | |
249 | priv->cfg->ops->hcmd->set_rxon_chain(priv); | |
250 | iwlcore_commit_rxon(priv); | |
251 | } | |
252 | ||
253 | static void iwl_clear_free_frames(struct iwl_priv *priv) | |
254 | { | |
255 | struct list_head *element; | |
256 | ||
257 | IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", | |
258 | priv->frames_count); | |
259 | ||
260 | while (!list_empty(&priv->free_frames)) { | |
261 | element = priv->free_frames.next; | |
262 | list_del(element); | |
263 | kfree(list_entry(element, struct iwl_frame, list)); | |
264 | priv->frames_count--; | |
265 | } | |
266 | ||
267 | if (priv->frames_count) { | |
268 | IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", | |
269 | priv->frames_count); | |
270 | priv->frames_count = 0; | |
271 | } | |
272 | } | |
273 | ||
274 | static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) | |
275 | { | |
276 | struct iwl_frame *frame; | |
277 | struct list_head *element; | |
278 | if (list_empty(&priv->free_frames)) { | |
279 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); | |
280 | if (!frame) { | |
281 | IWL_ERR(priv, "Could not allocate frame!\n"); | |
282 | return NULL; | |
283 | } | |
284 | ||
285 | priv->frames_count++; | |
286 | return frame; | |
287 | } | |
288 | ||
289 | element = priv->free_frames.next; | |
290 | list_del(element); | |
291 | return list_entry(element, struct iwl_frame, list); | |
292 | } | |
293 | ||
294 | static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) | |
295 | { | |
296 | memset(frame, 0, sizeof(*frame)); | |
297 | list_add(&frame->list, &priv->free_frames); | |
298 | } | |
299 | ||
300 | static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, | |
301 | struct ieee80211_hdr *hdr, | |
302 | int left) | |
303 | { | |
304 | if (!iwl_is_associated(priv) || !priv->ibss_beacon || | |
305 | ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && | |
306 | (priv->iw_mode != NL80211_IFTYPE_AP))) | |
307 | return 0; | |
308 | ||
309 | if (priv->ibss_beacon->len > left) | |
310 | return 0; | |
311 | ||
312 | memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); | |
313 | ||
314 | return priv->ibss_beacon->len; | |
315 | } | |
316 | ||
317 | static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, | |
318 | struct iwl_frame *frame, u8 rate) | |
319 | { | |
320 | struct iwl_tx_beacon_cmd *tx_beacon_cmd; | |
321 | unsigned int frame_size; | |
322 | ||
323 | tx_beacon_cmd = &frame->u.beacon; | |
324 | memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); | |
325 | ||
326 | tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; | |
327 | tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | |
328 | ||
329 | frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame, | |
330 | sizeof(frame->u) - sizeof(*tx_beacon_cmd)); | |
331 | ||
332 | BUG_ON(frame_size > MAX_MPDU_SIZE); | |
333 | tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); | |
334 | ||
335 | if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP)) | |
336 | tx_beacon_cmd->tx.rate_n_flags = | |
337 | iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK); | |
338 | else | |
339 | tx_beacon_cmd->tx.rate_n_flags = | |
340 | iwl_hw_set_rate_n_flags(rate, 0); | |
341 | ||
342 | tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | | |
343 | TX_CMD_FLG_TSF_MSK | | |
344 | TX_CMD_FLG_STA_RATE_MSK; | |
345 | ||
346 | return sizeof(*tx_beacon_cmd) + frame_size; | |
347 | } | |
348 | static int iwl_send_beacon_cmd(struct iwl_priv *priv) | |
349 | { | |
350 | struct iwl_frame *frame; | |
351 | unsigned int frame_size; | |
352 | int rc; | |
353 | u8 rate; | |
354 | ||
355 | frame = iwl_get_free_frame(priv); | |
356 | ||
357 | if (!frame) { | |
358 | IWL_ERR(priv, "Could not obtain free frame buffer for beacon " | |
359 | "command.\n"); | |
360 | return -ENOMEM; | |
361 | } | |
362 | ||
363 | rate = iwl_rate_get_lowest_plcp(priv); | |
364 | ||
365 | frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); | |
366 | ||
367 | rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, | |
368 | &frame->u.cmd[0]); | |
369 | ||
370 | iwl_free_frame(priv, frame); | |
371 | ||
372 | return rc; | |
373 | } | |
374 | ||
375 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | |
376 | { | |
377 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | |
378 | ||
379 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | |
380 | if (sizeof(dma_addr_t) > sizeof(u32)) | |
381 | addr |= | |
382 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | |
383 | ||
384 | return addr; | |
385 | } | |
386 | ||
387 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | |
388 | { | |
389 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | |
390 | ||
391 | return le16_to_cpu(tb->hi_n_len) >> 4; | |
392 | } | |
393 | ||
394 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | |
395 | dma_addr_t addr, u16 len) | |
396 | { | |
397 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | |
398 | u16 hi_n_len = len << 4; | |
399 | ||
400 | put_unaligned_le32(addr, &tb->lo); | |
401 | if (sizeof(dma_addr_t) > sizeof(u32)) | |
402 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | |
403 | ||
404 | tb->hi_n_len = cpu_to_le16(hi_n_len); | |
405 | ||
406 | tfd->num_tbs = idx + 1; | |
407 | } | |
408 | ||
409 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | |
410 | { | |
411 | return tfd->num_tbs & 0x1f; | |
412 | } | |
413 | ||
414 | /** | |
415 | * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | |
416 | * @priv - driver private data | |
417 | * @txq - tx queue | |
418 | * | |
419 | * Does NOT advance any TFD circular buffer read/write indexes | |
420 | * Does NOT free the TFD itself (which is within circular buffer) | |
421 | */ | |
422 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |
423 | { | |
424 | struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; | |
425 | struct iwl_tfd *tfd; | |
426 | struct pci_dev *dev = priv->pci_dev; | |
427 | int index = txq->q.read_ptr; | |
428 | int i; | |
429 | int num_tbs; | |
430 | ||
431 | tfd = &tfd_tmp[index]; | |
432 | ||
433 | /* Sanity check on number of chunks */ | |
434 | num_tbs = iwl_tfd_get_num_tbs(tfd); | |
435 | ||
436 | if (num_tbs >= IWL_NUM_OF_TBS) { | |
437 | IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); | |
438 | /* @todo issue fatal error, it is quite serious situation */ | |
439 | return; | |
440 | } | |
441 | ||
442 | /* Unmap tx_cmd */ | |
443 | if (num_tbs) | |
444 | pci_unmap_single(dev, | |
445 | pci_unmap_addr(&txq->meta[index], mapping), | |
446 | pci_unmap_len(&txq->meta[index], len), | |
447 | PCI_DMA_BIDIRECTIONAL); | |
448 | ||
449 | /* Unmap chunks, if any. */ | |
450 | for (i = 1; i < num_tbs; i++) { | |
451 | pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), | |
452 | iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); | |
453 | ||
454 | if (txq->txb) { | |
455 | dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]); | |
456 | txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; | |
457 | } | |
458 | } | |
459 | } | |
460 | ||
461 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, | |
462 | struct iwl_tx_queue *txq, | |
463 | dma_addr_t addr, u16 len, | |
464 | u8 reset, u8 pad) | |
465 | { | |
466 | struct iwl_queue *q; | |
467 | struct iwl_tfd *tfd, *tfd_tmp; | |
468 | u32 num_tbs; | |
469 | ||
470 | q = &txq->q; | |
471 | tfd_tmp = (struct iwl_tfd *)txq->tfds; | |
472 | tfd = &tfd_tmp[q->write_ptr]; | |
473 | ||
474 | if (reset) | |
475 | memset(tfd, 0, sizeof(*tfd)); | |
476 | ||
477 | num_tbs = iwl_tfd_get_num_tbs(tfd); | |
478 | ||
479 | /* Each TFD can point to a maximum 20 Tx buffers */ | |
480 | if (num_tbs >= IWL_NUM_OF_TBS) { | |
481 | IWL_ERR(priv, "Error can not send more than %d chunks\n", | |
482 | IWL_NUM_OF_TBS); | |
483 | return -EINVAL; | |
484 | } | |
485 | ||
486 | BUG_ON(addr & ~DMA_BIT_MASK(36)); | |
487 | if (unlikely(addr & ~IWL_TX_DMA_MASK)) | |
488 | IWL_ERR(priv, "Unaligned address = %llx\n", | |
489 | (unsigned long long)addr); | |
490 | ||
491 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
496 | /* | |
497 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
498 | * given Tx queue, and enable the DMA channel used for that queue. | |
499 | * | |
500 | * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA | |
501 | * channels supported in hardware. | |
502 | */ | |
503 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, | |
504 | struct iwl_tx_queue *txq) | |
505 | { | |
506 | int txq_id = txq->q.id; | |
507 | ||
508 | /* Circular buffer (TFD queue in DRAM) physical base address */ | |
509 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | |
510 | txq->q.dma_addr >> 8); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
515 | /****************************************************************************** | |
516 | * | |
517 | * Generic RX handler implementations | |
518 | * | |
519 | ******************************************************************************/ | |
520 | static void iwl_rx_reply_alive(struct iwl_priv *priv, | |
521 | struct iwl_rx_mem_buffer *rxb) | |
522 | { | |
523 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
524 | struct iwl_alive_resp *palive; | |
525 | struct delayed_work *pwork; | |
526 | ||
527 | palive = &pkt->u.alive_frame; | |
528 | ||
529 | IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " | |
530 | "0x%01X 0x%01X\n", | |
531 | palive->is_valid, palive->ver_type, | |
532 | palive->ver_subtype); | |
533 | ||
534 | if (palive->ver_subtype == INITIALIZE_SUBTYPE) { | |
535 | IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); | |
536 | memcpy(&priv->card_alive_init, | |
537 | &pkt->u.alive_frame, | |
538 | sizeof(struct iwl_init_alive_resp)); | |
539 | pwork = &priv->init_alive_start; | |
540 | } else { | |
541 | IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); | |
542 | memcpy(&priv->card_alive, &pkt->u.alive_frame, | |
543 | sizeof(struct iwl_alive_resp)); | |
544 | pwork = &priv->alive_start; | |
545 | } | |
546 | ||
547 | /* We delay the ALIVE response by 5ms to | |
548 | * give the HW RF Kill time to activate... */ | |
549 | if (palive->is_valid == UCODE_VALID_OK) | |
550 | queue_delayed_work(priv->workqueue, pwork, | |
551 | msecs_to_jiffies(5)); | |
552 | else | |
553 | IWL_WARN(priv, "uCode did not respond OK.\n"); | |
554 | } | |
555 | ||
556 | static void iwl_bg_beacon_update(struct work_struct *work) | |
557 | { | |
558 | struct iwl_priv *priv = | |
559 | container_of(work, struct iwl_priv, beacon_update); | |
560 | struct sk_buff *beacon; | |
561 | ||
562 | /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ | |
563 | beacon = ieee80211_beacon_get(priv->hw, priv->vif); | |
564 | ||
565 | if (!beacon) { | |
566 | IWL_ERR(priv, "update beacon failed\n"); | |
567 | return; | |
568 | } | |
569 | ||
570 | mutex_lock(&priv->mutex); | |
571 | /* new beacon skb is allocated every time; dispose previous.*/ | |
572 | if (priv->ibss_beacon) | |
573 | dev_kfree_skb(priv->ibss_beacon); | |
574 | ||
575 | priv->ibss_beacon = beacon; | |
576 | mutex_unlock(&priv->mutex); | |
577 | ||
578 | iwl_send_beacon_cmd(priv); | |
579 | } | |
580 | ||
581 | /** | |
582 | * iwl_bg_statistics_periodic - Timer callback to queue statistics | |
583 | * | |
584 | * This callback is provided in order to send a statistics request. | |
585 | * | |
586 | * This timer function is continually reset to execute within | |
587 | * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION | |
588 | * was received. We need to ensure we receive the statistics in order | |
589 | * to update the temperature used for calibrating the TXPOWER. | |
590 | */ | |
591 | static void iwl_bg_statistics_periodic(unsigned long data) | |
592 | { | |
593 | struct iwl_priv *priv = (struct iwl_priv *)data; | |
594 | ||
595 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
596 | return; | |
597 | ||
598 | /* dont send host command if rf-kill is on */ | |
599 | if (!iwl_is_ready_rf(priv)) | |
600 | return; | |
601 | ||
602 | iwl_send_statistics_request(priv, CMD_ASYNC); | |
603 | } | |
604 | ||
605 | static void iwl_rx_beacon_notif(struct iwl_priv *priv, | |
606 | struct iwl_rx_mem_buffer *rxb) | |
607 | { | |
608 | #ifdef CONFIG_IWLWIFI_DEBUG | |
609 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
610 | struct iwl4965_beacon_notif *beacon = | |
611 | (struct iwl4965_beacon_notif *)pkt->u.raw; | |
612 | u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); | |
613 | ||
614 | IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " | |
615 | "tsf %d %d rate %d\n", | |
616 | le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, | |
617 | beacon->beacon_notify_hdr.failure_frame, | |
618 | le32_to_cpu(beacon->ibss_mgr_status), | |
619 | le32_to_cpu(beacon->high_tsf), | |
620 | le32_to_cpu(beacon->low_tsf), rate); | |
621 | #endif | |
622 | ||
623 | if ((priv->iw_mode == NL80211_IFTYPE_AP) && | |
624 | (!test_bit(STATUS_EXIT_PENDING, &priv->status))) | |
625 | queue_work(priv->workqueue, &priv->beacon_update); | |
626 | } | |
627 | ||
628 | /* Handle notification from uCode that card's power state is changing | |
629 | * due to software, hardware, or critical temperature RFKILL */ | |
630 | static void iwl_rx_card_state_notif(struct iwl_priv *priv, | |
631 | struct iwl_rx_mem_buffer *rxb) | |
632 | { | |
633 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
634 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); | |
635 | unsigned long status = priv->status; | |
636 | ||
637 | IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n", | |
638 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", | |
639 | (flags & SW_CARD_DISABLED) ? "Kill" : "On"); | |
640 | ||
641 | if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | | |
642 | RF_CARD_DISABLED)) { | |
643 | ||
644 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | |
645 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | |
646 | ||
647 | iwl_write_direct32(priv, HBUS_TARG_MBX_C, | |
648 | HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); | |
649 | ||
650 | if (!(flags & RXON_CARD_DISABLED)) { | |
651 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | |
652 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | |
653 | iwl_write_direct32(priv, HBUS_TARG_MBX_C, | |
654 | HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); | |
655 | } | |
656 | if (flags & RF_CARD_DISABLED) | |
657 | iwl_tt_enter_ct_kill(priv); | |
658 | } | |
659 | if (!(flags & RF_CARD_DISABLED)) | |
660 | iwl_tt_exit_ct_kill(priv); | |
661 | ||
662 | if (flags & HW_CARD_DISABLED) | |
663 | set_bit(STATUS_RF_KILL_HW, &priv->status); | |
664 | else | |
665 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | |
666 | ||
667 | ||
668 | if (!(flags & RXON_CARD_DISABLED)) | |
669 | iwl_scan_cancel(priv); | |
670 | ||
671 | if ((test_bit(STATUS_RF_KILL_HW, &status) != | |
672 | test_bit(STATUS_RF_KILL_HW, &priv->status))) | |
673 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | |
674 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | |
675 | else | |
676 | wake_up_interruptible(&priv->wait_command_queue); | |
677 | } | |
678 | ||
679 | int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) | |
680 | { | |
681 | if (src == IWL_PWR_SRC_VAUX) { | |
682 | if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) | |
683 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | |
684 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, | |
685 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
686 | } else { | |
687 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | |
688 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, | |
689 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
690 | } | |
691 | ||
692 | return 0; | |
693 | } | |
694 | ||
695 | /** | |
696 | * iwl_setup_rx_handlers - Initialize Rx handler callbacks | |
697 | * | |
698 | * Setup the RX handlers for each of the reply types sent from the uCode | |
699 | * to the host. | |
700 | * | |
701 | * This function chains into the hardware specific files for them to setup | |
702 | * any hardware specific handlers as well. | |
703 | */ | |
704 | static void iwl_setup_rx_handlers(struct iwl_priv *priv) | |
705 | { | |
706 | priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; | |
707 | priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; | |
708 | priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; | |
709 | priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; | |
710 | priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = | |
711 | iwl_rx_pm_debug_statistics_notif; | |
712 | priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; | |
713 | ||
714 | /* | |
715 | * The same handler is used for both the REPLY to a discrete | |
716 | * statistics request from the host as well as for the periodic | |
717 | * statistics notifications (after received beacons) from the uCode. | |
718 | */ | |
719 | priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_rx_statistics; | |
720 | priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; | |
721 | ||
722 | iwl_setup_spectrum_handlers(priv); | |
723 | iwl_setup_rx_scan_handlers(priv); | |
724 | ||
725 | /* status change handler */ | |
726 | priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; | |
727 | ||
728 | priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = | |
729 | iwl_rx_missed_beacon_notif; | |
730 | /* Rx handlers */ | |
731 | priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; | |
732 | priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; | |
733 | /* block ack */ | |
734 | priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba; | |
735 | /* Set up hardware specific Rx handlers */ | |
736 | priv->cfg->ops->lib->rx_handler_setup(priv); | |
737 | } | |
738 | ||
739 | /** | |
740 | * iwl_rx_handle - Main entry function for receiving responses from uCode | |
741 | * | |
742 | * Uses the priv->rx_handlers callback function array to invoke | |
743 | * the appropriate handlers, including command responses, | |
744 | * frame-received notifications, and other notifications. | |
745 | */ | |
746 | void iwl_rx_handle(struct iwl_priv *priv) | |
747 | { | |
748 | struct iwl_rx_mem_buffer *rxb; | |
749 | struct iwl_rx_packet *pkt; | |
750 | struct iwl_rx_queue *rxq = &priv->rxq; | |
751 | u32 r, i; | |
752 | int reclaim; | |
753 | unsigned long flags; | |
754 | u8 fill_rx = 0; | |
755 | u32 count = 8; | |
756 | int total_empty; | |
757 | ||
758 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | |
759 | * buffer that the driver may process (last buffer filled by ucode). */ | |
760 | r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; | |
761 | i = rxq->read; | |
762 | ||
763 | /* Rx interrupt, but nothing sent from uCode */ | |
764 | if (i == r) | |
765 | IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); | |
766 | ||
767 | /* calculate total frames need to be restock after handling RX */ | |
768 | total_empty = r - priv->rxq.write_actual; | |
769 | if (total_empty < 0) | |
770 | total_empty += RX_QUEUE_SIZE; | |
771 | ||
772 | if (total_empty > (RX_QUEUE_SIZE / 2)) | |
773 | fill_rx = 1; | |
774 | ||
775 | while (i != r) { | |
776 | rxb = rxq->queue[i]; | |
777 | ||
778 | /* If an RXB doesn't have a Rx queue slot associated with it, | |
779 | * then a bug has been introduced in the queue refilling | |
780 | * routines -- catch it here */ | |
781 | BUG_ON(rxb == NULL); | |
782 | ||
783 | rxq->queue[i] = NULL; | |
784 | ||
785 | pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, | |
786 | priv->hw_params.rx_buf_size + 256, | |
787 | PCI_DMA_FROMDEVICE); | |
788 | pkt = (struct iwl_rx_packet *)rxb->skb->data; | |
789 | ||
790 | /* Reclaim a command buffer only if this packet is a response | |
791 | * to a (driver-originated) command. | |
792 | * If the packet (e.g. Rx frame) originated from uCode, | |
793 | * there is no command buffer to reclaim. | |
794 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
795 | * but apparently a few don't get set; catch them here. */ | |
796 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && | |
797 | (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && | |
798 | (pkt->hdr.cmd != REPLY_RX) && | |
799 | (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && | |
800 | (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && | |
801 | (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && | |
802 | (pkt->hdr.cmd != REPLY_TX); | |
803 | ||
804 | /* Based on type of command response or notification, | |
805 | * handle those that need handling via function in | |
806 | * rx_handlers table. See iwl_setup_rx_handlers() */ | |
807 | if (priv->rx_handlers[pkt->hdr.cmd]) { | |
808 | IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, | |
809 | i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | |
810 | priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); | |
811 | priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; | |
812 | } else { | |
813 | /* No handling needed */ | |
814 | IWL_DEBUG_RX(priv, | |
815 | "r %d i %d No handler needed for %s, 0x%02x\n", | |
816 | r, i, get_cmd_string(pkt->hdr.cmd), | |
817 | pkt->hdr.cmd); | |
818 | } | |
819 | ||
820 | if (reclaim) { | |
821 | /* Invoke any callbacks, transfer the skb to caller, and | |
822 | * fire off the (possibly) blocking iwl_send_cmd() | |
823 | * as we reclaim the driver command queue */ | |
824 | if (rxb && rxb->skb) | |
825 | iwl_tx_cmd_complete(priv, rxb); | |
826 | else | |
827 | IWL_WARN(priv, "Claim null rxb?\n"); | |
828 | } | |
829 | ||
830 | /* For now we just don't re-use anything. We can tweak this | |
831 | * later to try and re-use notification packets and SKBs that | |
832 | * fail to Rx correctly */ | |
833 | if (rxb->skb != NULL) { | |
834 | priv->alloc_rxb_skb--; | |
835 | dev_kfree_skb_any(rxb->skb); | |
836 | rxb->skb = NULL; | |
837 | } | |
838 | ||
839 | spin_lock_irqsave(&rxq->lock, flags); | |
840 | list_add_tail(&rxb->list, &priv->rxq.rx_used); | |
841 | spin_unlock_irqrestore(&rxq->lock, flags); | |
842 | i = (i + 1) & RX_QUEUE_MASK; | |
843 | /* If there are a lot of unused frames, | |
844 | * restock the Rx queue so ucode wont assert. */ | |
845 | if (fill_rx) { | |
846 | count++; | |
847 | if (count >= 8) { | |
848 | priv->rxq.read = i; | |
849 | iwl_rx_replenish_now(priv); | |
850 | count = 0; | |
851 | } | |
852 | } | |
853 | } | |
854 | ||
855 | /* Backtrack one entry */ | |
856 | priv->rxq.read = i; | |
857 | if (fill_rx) | |
858 | iwl_rx_replenish_now(priv); | |
859 | else | |
860 | iwl_rx_queue_restock(priv); | |
861 | } | |
862 | ||
863 | /* call this function to flush any scheduled tasklet */ | |
864 | static inline void iwl_synchronize_irq(struct iwl_priv *priv) | |
865 | { | |
866 | /* wait to make sure we flush pending tasklet*/ | |
867 | synchronize_irq(priv->pci_dev->irq); | |
868 | tasklet_kill(&priv->irq_tasklet); | |
869 | } | |
870 | ||
871 | static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) | |
872 | { | |
873 | u32 inta, handled = 0; | |
874 | u32 inta_fh; | |
875 | unsigned long flags; | |
876 | #ifdef CONFIG_IWLWIFI_DEBUG | |
877 | u32 inta_mask; | |
878 | #endif | |
879 | ||
880 | spin_lock_irqsave(&priv->lock, flags); | |
881 | ||
882 | /* Ack/clear/reset pending uCode interrupts. | |
883 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
884 | * and will clear only when CSR_FH_INT_STATUS gets cleared. */ | |
885 | inta = iwl_read32(priv, CSR_INT); | |
886 | iwl_write32(priv, CSR_INT, inta); | |
887 | ||
888 | /* Ack/clear/reset pending flow-handler (DMA) interrupts. | |
889 | * Any new interrupts that happen after this, either while we're | |
890 | * in this tasklet, or later, will show up in next ISR/tasklet. */ | |
891 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | |
892 | iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); | |
893 | ||
894 | #ifdef CONFIG_IWLWIFI_DEBUG | |
895 | if (iwl_get_debug_level(priv) & IWL_DL_ISR) { | |
896 | /* just for debug */ | |
897 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | |
898 | IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", | |
899 | inta, inta_mask, inta_fh); | |
900 | } | |
901 | #endif | |
902 | ||
903 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not | |
904 | * atomic, make sure that inta covers all the interrupts that | |
905 | * we've discovered, even if FH interrupt came in just after | |
906 | * reading CSR_INT. */ | |
907 | if (inta_fh & CSR49_FH_INT_RX_MASK) | |
908 | inta |= CSR_INT_BIT_FH_RX; | |
909 | if (inta_fh & CSR49_FH_INT_TX_MASK) | |
910 | inta |= CSR_INT_BIT_FH_TX; | |
911 | ||
912 | /* Now service all interrupt bits discovered above. */ | |
913 | if (inta & CSR_INT_BIT_HW_ERR) { | |
914 | IWL_ERR(priv, "Hardware error detected. Restarting.\n"); | |
915 | ||
916 | /* Tell the device to stop sending interrupts */ | |
917 | iwl_disable_interrupts(priv); | |
918 | ||
919 | priv->isr_stats.hw++; | |
920 | iwl_irq_handle_error(priv); | |
921 | ||
922 | handled |= CSR_INT_BIT_HW_ERR; | |
923 | ||
924 | spin_unlock_irqrestore(&priv->lock, flags); | |
925 | ||
926 | return; | |
927 | } | |
928 | ||
929 | #ifdef CONFIG_IWLWIFI_DEBUG | |
930 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | |
931 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | |
932 | if (inta & CSR_INT_BIT_SCD) { | |
933 | IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " | |
934 | "the frame/frames.\n"); | |
935 | priv->isr_stats.sch++; | |
936 | } | |
937 | ||
938 | /* Alive notification via Rx interrupt will do the real work */ | |
939 | if (inta & CSR_INT_BIT_ALIVE) { | |
940 | IWL_DEBUG_ISR(priv, "Alive interrupt\n"); | |
941 | priv->isr_stats.alive++; | |
942 | } | |
943 | } | |
944 | #endif | |
945 | /* Safely ignore these bits for debug checks below */ | |
946 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
947 | ||
948 | /* HW RF KILL switch toggled */ | |
949 | if (inta & CSR_INT_BIT_RF_KILL) { | |
950 | int hw_rf_kill = 0; | |
951 | if (!(iwl_read32(priv, CSR_GP_CNTRL) & | |
952 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | |
953 | hw_rf_kill = 1; | |
954 | ||
955 | IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", | |
956 | hw_rf_kill ? "disable radio" : "enable radio"); | |
957 | ||
958 | priv->isr_stats.rfkill++; | |
959 | ||
960 | /* driver only loads ucode once setting the interface up. | |
961 | * the driver allows loading the ucode even if the radio | |
962 | * is killed. Hence update the killswitch state here. The | |
963 | * rfkill handler will care about restarting if needed. | |
964 | */ | |
965 | if (!test_bit(STATUS_ALIVE, &priv->status)) { | |
966 | if (hw_rf_kill) | |
967 | set_bit(STATUS_RF_KILL_HW, &priv->status); | |
968 | else | |
969 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | |
970 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); | |
971 | } | |
972 | ||
973 | handled |= CSR_INT_BIT_RF_KILL; | |
974 | } | |
975 | ||
976 | /* Chip got too hot and stopped itself */ | |
977 | if (inta & CSR_INT_BIT_CT_KILL) { | |
978 | IWL_ERR(priv, "Microcode CT kill error detected.\n"); | |
979 | priv->isr_stats.ctkill++; | |
980 | handled |= CSR_INT_BIT_CT_KILL; | |
981 | } | |
982 | ||
983 | /* Error detected by uCode */ | |
984 | if (inta & CSR_INT_BIT_SW_ERR) { | |
985 | IWL_ERR(priv, "Microcode SW error detected. " | |
986 | " Restarting 0x%X.\n", inta); | |
987 | priv->isr_stats.sw++; | |
988 | priv->isr_stats.sw_err = inta; | |
989 | iwl_irq_handle_error(priv); | |
990 | handled |= CSR_INT_BIT_SW_ERR; | |
991 | } | |
992 | ||
993 | /* uCode wakes up after power-down sleep */ | |
994 | if (inta & CSR_INT_BIT_WAKEUP) { | |
995 | IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); | |
996 | iwl_rx_queue_update_write_ptr(priv, &priv->rxq); | |
997 | iwl_txq_update_write_ptr(priv, &priv->txq[0]); | |
998 | iwl_txq_update_write_ptr(priv, &priv->txq[1]); | |
999 | iwl_txq_update_write_ptr(priv, &priv->txq[2]); | |
1000 | iwl_txq_update_write_ptr(priv, &priv->txq[3]); | |
1001 | iwl_txq_update_write_ptr(priv, &priv->txq[4]); | |
1002 | iwl_txq_update_write_ptr(priv, &priv->txq[5]); | |
1003 | ||
1004 | priv->isr_stats.wakeup++; | |
1005 | ||
1006 | handled |= CSR_INT_BIT_WAKEUP; | |
1007 | } | |
1008 | ||
1009 | /* All uCode command responses, including Tx command responses, | |
1010 | * Rx "responses" (frame-received notification), and other | |
1011 | * notifications from uCode come through here*/ | |
1012 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | |
1013 | iwl_rx_handle(priv); | |
1014 | priv->isr_stats.rx++; | |
1015 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1016 | } | |
1017 | ||
1018 | if (inta & CSR_INT_BIT_FH_TX) { | |
1019 | IWL_DEBUG_ISR(priv, "Tx interrupt\n"); | |
1020 | priv->isr_stats.tx++; | |
1021 | handled |= CSR_INT_BIT_FH_TX; | |
1022 | /* FH finished to write, send event */ | |
1023 | priv->ucode_write_complete = 1; | |
1024 | wake_up_interruptible(&priv->wait_command_queue); | |
1025 | } | |
1026 | ||
1027 | if (inta & ~handled) { | |
1028 | IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); | |
1029 | priv->isr_stats.unhandled++; | |
1030 | } | |
1031 | ||
1032 | if (inta & ~(priv->inta_mask)) { | |
1033 | IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", | |
1034 | inta & ~priv->inta_mask); | |
1035 | IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); | |
1036 | } | |
1037 | ||
1038 | /* Re-enable all interrupts */ | |
1039 | /* only Re-enable if diabled by irq */ | |
1040 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | |
1041 | iwl_enable_interrupts(priv); | |
1042 | ||
1043 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1044 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | |
1045 | inta = iwl_read32(priv, CSR_INT); | |
1046 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | |
1047 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | |
1048 | IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " | |
1049 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); | |
1050 | } | |
1051 | #endif | |
1052 | spin_unlock_irqrestore(&priv->lock, flags); | |
1053 | } | |
1054 | ||
1055 | /* tasklet for iwlagn interrupt */ | |
1056 | static void iwl_irq_tasklet(struct iwl_priv *priv) | |
1057 | { | |
1058 | u32 inta = 0; | |
1059 | u32 handled = 0; | |
1060 | unsigned long flags; | |
1061 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1062 | u32 inta_mask; | |
1063 | #endif | |
1064 | ||
1065 | spin_lock_irqsave(&priv->lock, flags); | |
1066 | ||
1067 | /* Ack/clear/reset pending uCode interrupts. | |
1068 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
1069 | */ | |
1070 | iwl_write32(priv, CSR_INT, priv->inta); | |
1071 | ||
1072 | inta = priv->inta; | |
1073 | ||
1074 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1075 | if (iwl_get_debug_level(priv) & IWL_DL_ISR) { | |
1076 | /* just for debug */ | |
1077 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | |
1078 | IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", | |
1079 | inta, inta_mask); | |
1080 | } | |
1081 | #endif | |
1082 | /* saved interrupt in inta variable now we can reset priv->inta */ | |
1083 | priv->inta = 0; | |
1084 | ||
1085 | /* Now service all interrupt bits discovered above. */ | |
1086 | if (inta & CSR_INT_BIT_HW_ERR) { | |
1087 | IWL_ERR(priv, "Hardware error detected. Restarting.\n"); | |
1088 | ||
1089 | /* Tell the device to stop sending interrupts */ | |
1090 | iwl_disable_interrupts(priv); | |
1091 | ||
1092 | priv->isr_stats.hw++; | |
1093 | iwl_irq_handle_error(priv); | |
1094 | ||
1095 | handled |= CSR_INT_BIT_HW_ERR; | |
1096 | ||
1097 | spin_unlock_irqrestore(&priv->lock, flags); | |
1098 | ||
1099 | return; | |
1100 | } | |
1101 | ||
1102 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1103 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | |
1104 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | |
1105 | if (inta & CSR_INT_BIT_SCD) { | |
1106 | IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " | |
1107 | "the frame/frames.\n"); | |
1108 | priv->isr_stats.sch++; | |
1109 | } | |
1110 | ||
1111 | /* Alive notification via Rx interrupt will do the real work */ | |
1112 | if (inta & CSR_INT_BIT_ALIVE) { | |
1113 | IWL_DEBUG_ISR(priv, "Alive interrupt\n"); | |
1114 | priv->isr_stats.alive++; | |
1115 | } | |
1116 | } | |
1117 | #endif | |
1118 | /* Safely ignore these bits for debug checks below */ | |
1119 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
1120 | ||
1121 | /* HW RF KILL switch toggled */ | |
1122 | if (inta & CSR_INT_BIT_RF_KILL) { | |
1123 | int hw_rf_kill = 0; | |
1124 | if (!(iwl_read32(priv, CSR_GP_CNTRL) & | |
1125 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | |
1126 | hw_rf_kill = 1; | |
1127 | ||
1128 | IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", | |
1129 | hw_rf_kill ? "disable radio" : "enable radio"); | |
1130 | ||
1131 | priv->isr_stats.rfkill++; | |
1132 | ||
1133 | /* driver only loads ucode once setting the interface up. | |
1134 | * the driver allows loading the ucode even if the radio | |
1135 | * is killed. Hence update the killswitch state here. The | |
1136 | * rfkill handler will care about restarting if needed. | |
1137 | */ | |
1138 | if (!test_bit(STATUS_ALIVE, &priv->status)) { | |
1139 | if (hw_rf_kill) | |
1140 | set_bit(STATUS_RF_KILL_HW, &priv->status); | |
1141 | else | |
1142 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | |
1143 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); | |
1144 | } | |
1145 | ||
1146 | handled |= CSR_INT_BIT_RF_KILL; | |
1147 | } | |
1148 | ||
1149 | /* Chip got too hot and stopped itself */ | |
1150 | if (inta & CSR_INT_BIT_CT_KILL) { | |
1151 | IWL_ERR(priv, "Microcode CT kill error detected.\n"); | |
1152 | priv->isr_stats.ctkill++; | |
1153 | handled |= CSR_INT_BIT_CT_KILL; | |
1154 | } | |
1155 | ||
1156 | /* Error detected by uCode */ | |
1157 | if (inta & CSR_INT_BIT_SW_ERR) { | |
1158 | IWL_ERR(priv, "Microcode SW error detected. " | |
1159 | " Restarting 0x%X.\n", inta); | |
1160 | priv->isr_stats.sw++; | |
1161 | priv->isr_stats.sw_err = inta; | |
1162 | iwl_irq_handle_error(priv); | |
1163 | handled |= CSR_INT_BIT_SW_ERR; | |
1164 | } | |
1165 | ||
1166 | /* uCode wakes up after power-down sleep */ | |
1167 | if (inta & CSR_INT_BIT_WAKEUP) { | |
1168 | IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); | |
1169 | iwl_rx_queue_update_write_ptr(priv, &priv->rxq); | |
1170 | iwl_txq_update_write_ptr(priv, &priv->txq[0]); | |
1171 | iwl_txq_update_write_ptr(priv, &priv->txq[1]); | |
1172 | iwl_txq_update_write_ptr(priv, &priv->txq[2]); | |
1173 | iwl_txq_update_write_ptr(priv, &priv->txq[3]); | |
1174 | iwl_txq_update_write_ptr(priv, &priv->txq[4]); | |
1175 | iwl_txq_update_write_ptr(priv, &priv->txq[5]); | |
1176 | ||
1177 | priv->isr_stats.wakeup++; | |
1178 | ||
1179 | handled |= CSR_INT_BIT_WAKEUP; | |
1180 | } | |
1181 | ||
1182 | /* All uCode command responses, including Tx command responses, | |
1183 | * Rx "responses" (frame-received notification), and other | |
1184 | * notifications from uCode come through here*/ | |
1185 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
1186 | CSR_INT_BIT_RX_PERIODIC)) { | |
1187 | IWL_DEBUG_ISR(priv, "Rx interrupt\n"); | |
1188 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | |
1189 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1190 | iwl_write32(priv, CSR_FH_INT_STATUS, | |
1191 | CSR49_FH_INT_RX_MASK); | |
1192 | } | |
1193 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
1194 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1195 | iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); | |
1196 | } | |
1197 | /* Sending RX interrupt require many steps to be done in the | |
1198 | * the device: | |
1199 | * 1- write interrupt to current index in ICT table. | |
1200 | * 2- dma RX frame. | |
1201 | * 3- update RX shared data to indicate last write index. | |
1202 | * 4- send interrupt. | |
1203 | * This could lead to RX race, driver could receive RX interrupt | |
1204 | * but the shared data changes does not reflect this. | |
1205 | * this could lead to RX race, RX periodic will solve this race | |
1206 | */ | |
1207 | iwl_write32(priv, CSR_INT_PERIODIC_REG, | |
1208 | CSR_INT_PERIODIC_DIS); | |
1209 | iwl_rx_handle(priv); | |
1210 | /* Only set RX periodic if real RX is received. */ | |
1211 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1212 | iwl_write32(priv, CSR_INT_PERIODIC_REG, | |
1213 | CSR_INT_PERIODIC_ENA); | |
1214 | ||
1215 | priv->isr_stats.rx++; | |
1216 | } | |
1217 | ||
1218 | if (inta & CSR_INT_BIT_FH_TX) { | |
1219 | iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK); | |
1220 | IWL_DEBUG_ISR(priv, "Tx interrupt\n"); | |
1221 | priv->isr_stats.tx++; | |
1222 | handled |= CSR_INT_BIT_FH_TX; | |
1223 | /* FH finished to write, send event */ | |
1224 | priv->ucode_write_complete = 1; | |
1225 | wake_up_interruptible(&priv->wait_command_queue); | |
1226 | } | |
1227 | ||
1228 | if (inta & ~handled) { | |
1229 | IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); | |
1230 | priv->isr_stats.unhandled++; | |
1231 | } | |
1232 | ||
1233 | if (inta & ~(priv->inta_mask)) { | |
1234 | IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", | |
1235 | inta & ~priv->inta_mask); | |
1236 | } | |
1237 | ||
1238 | ||
1239 | /* Re-enable all interrupts */ | |
1240 | /* only Re-enable if diabled by irq */ | |
1241 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | |
1242 | iwl_enable_interrupts(priv); | |
1243 | ||
1244 | spin_unlock_irqrestore(&priv->lock, flags); | |
1245 | ||
1246 | } | |
1247 | ||
1248 | ||
1249 | /****************************************************************************** | |
1250 | * | |
1251 | * uCode download functions | |
1252 | * | |
1253 | ******************************************************************************/ | |
1254 | ||
1255 | static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) | |
1256 | { | |
1257 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); | |
1258 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); | |
1259 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); | |
1260 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); | |
1261 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); | |
1262 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); | |
1263 | } | |
1264 | ||
1265 | static void iwl_nic_start(struct iwl_priv *priv) | |
1266 | { | |
1267 | /* Remove all resets to allow NIC to operate */ | |
1268 | iwl_write32(priv, CSR_RESET, 0); | |
1269 | } | |
1270 | ||
1271 | ||
1272 | /** | |
1273 | * iwl_read_ucode - Read uCode images from disk file. | |
1274 | * | |
1275 | * Copy into buffers for card to fetch via bus-mastering | |
1276 | */ | |
1277 | static int iwl_read_ucode(struct iwl_priv *priv) | |
1278 | { | |
1279 | struct iwl_ucode_header *ucode; | |
1280 | int ret = -EINVAL, index; | |
1281 | const struct firmware *ucode_raw; | |
1282 | const char *name_pre = priv->cfg->fw_name_pre; | |
1283 | const unsigned int api_max = priv->cfg->ucode_api_max; | |
1284 | const unsigned int api_min = priv->cfg->ucode_api_min; | |
1285 | char buf[25]; | |
1286 | u8 *src; | |
1287 | size_t len; | |
1288 | u32 api_ver, build; | |
1289 | u32 inst_size, data_size, init_size, init_data_size, boot_size; | |
1290 | u16 eeprom_ver; | |
1291 | ||
1292 | /* Ask kernel firmware_class module to get the boot firmware off disk. | |
1293 | * request_firmware() is synchronous, file is in memory on return. */ | |
1294 | for (index = api_max; index >= api_min; index--) { | |
1295 | sprintf(buf, "%s%d%s", name_pre, index, ".ucode"); | |
1296 | ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); | |
1297 | if (ret < 0) { | |
1298 | IWL_ERR(priv, "%s firmware file req failed: %d\n", | |
1299 | buf, ret); | |
1300 | if (ret == -ENOENT) | |
1301 | continue; | |
1302 | else | |
1303 | goto error; | |
1304 | } else { | |
1305 | if (index < api_max) | |
1306 | IWL_ERR(priv, "Loaded firmware %s, " | |
1307 | "which is deprecated. " | |
1308 | "Please use API v%u instead.\n", | |
1309 | buf, api_max); | |
1310 | ||
1311 | IWL_DEBUG_INFO(priv, "Got firmware '%s' file (%zd bytes) from disk\n", | |
1312 | buf, ucode_raw->size); | |
1313 | break; | |
1314 | } | |
1315 | } | |
1316 | ||
1317 | if (ret < 0) | |
1318 | goto error; | |
1319 | ||
1320 | /* Make sure that we got at least the v1 header! */ | |
1321 | if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) { | |
1322 | IWL_ERR(priv, "File size way too small!\n"); | |
1323 | ret = -EINVAL; | |
1324 | goto err_release; | |
1325 | } | |
1326 | ||
1327 | /* Data from ucode file: header followed by uCode images */ | |
1328 | ucode = (struct iwl_ucode_header *)ucode_raw->data; | |
1329 | ||
1330 | priv->ucode_ver = le32_to_cpu(ucode->ver); | |
1331 | api_ver = IWL_UCODE_API(priv->ucode_ver); | |
1332 | build = priv->cfg->ops->ucode->get_build(ucode, api_ver); | |
1333 | inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver); | |
1334 | data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver); | |
1335 | init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver); | |
1336 | init_data_size = | |
1337 | priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver); | |
1338 | boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver); | |
1339 | src = priv->cfg->ops->ucode->get_data(ucode, api_ver); | |
1340 | ||
1341 | /* api_ver should match the api version forming part of the | |
1342 | * firmware filename ... but we don't check for that and only rely | |
1343 | * on the API version read from firmware header from here on forward */ | |
1344 | ||
1345 | if (api_ver < api_min || api_ver > api_max) { | |
1346 | IWL_ERR(priv, "Driver unable to support your firmware API. " | |
1347 | "Driver supports v%u, firmware is v%u.\n", | |
1348 | api_max, api_ver); | |
1349 | priv->ucode_ver = 0; | |
1350 | ret = -EINVAL; | |
1351 | goto err_release; | |
1352 | } | |
1353 | if (api_ver != api_max) | |
1354 | IWL_ERR(priv, "Firmware has old API version. Expected v%u, " | |
1355 | "got v%u. New firmware can be obtained " | |
1356 | "from http://www.intellinuxwireless.org.\n", | |
1357 | api_max, api_ver); | |
1358 | ||
1359 | IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", | |
1360 | IWL_UCODE_MAJOR(priv->ucode_ver), | |
1361 | IWL_UCODE_MINOR(priv->ucode_ver), | |
1362 | IWL_UCODE_API(priv->ucode_ver), | |
1363 | IWL_UCODE_SERIAL(priv->ucode_ver)); | |
1364 | ||
1365 | if (build) | |
1366 | IWL_DEBUG_INFO(priv, "Build %u\n", build); | |
1367 | ||
1368 | eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); | |
1369 | IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n", | |
1370 | (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) | |
1371 | ? "OTP" : "EEPROM", eeprom_ver); | |
1372 | ||
1373 | IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", | |
1374 | priv->ucode_ver); | |
1375 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", | |
1376 | inst_size); | |
1377 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", | |
1378 | data_size); | |
1379 | IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", | |
1380 | init_size); | |
1381 | IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", | |
1382 | init_data_size); | |
1383 | IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", | |
1384 | boot_size); | |
1385 | ||
1386 | /* Verify size of file vs. image size info in file's header */ | |
1387 | if (ucode_raw->size != | |
1388 | priv->cfg->ops->ucode->get_header_size(api_ver) + | |
1389 | inst_size + data_size + init_size + | |
1390 | init_data_size + boot_size) { | |
1391 | ||
1392 | IWL_DEBUG_INFO(priv, | |
1393 | "uCode file size %d does not match expected size\n", | |
1394 | (int)ucode_raw->size); | |
1395 | ret = -EINVAL; | |
1396 | goto err_release; | |
1397 | } | |
1398 | ||
1399 | /* Verify that uCode images will fit in card's SRAM */ | |
1400 | if (inst_size > priv->hw_params.max_inst_size) { | |
1401 | IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", | |
1402 | inst_size); | |
1403 | ret = -EINVAL; | |
1404 | goto err_release; | |
1405 | } | |
1406 | ||
1407 | if (data_size > priv->hw_params.max_data_size) { | |
1408 | IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", | |
1409 | data_size); | |
1410 | ret = -EINVAL; | |
1411 | goto err_release; | |
1412 | } | |
1413 | if (init_size > priv->hw_params.max_inst_size) { | |
1414 | IWL_INFO(priv, "uCode init instr len %d too large to fit in\n", | |
1415 | init_size); | |
1416 | ret = -EINVAL; | |
1417 | goto err_release; | |
1418 | } | |
1419 | if (init_data_size > priv->hw_params.max_data_size) { | |
1420 | IWL_INFO(priv, "uCode init data len %d too large to fit in\n", | |
1421 | init_data_size); | |
1422 | ret = -EINVAL; | |
1423 | goto err_release; | |
1424 | } | |
1425 | if (boot_size > priv->hw_params.max_bsm_size) { | |
1426 | IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n", | |
1427 | boot_size); | |
1428 | ret = -EINVAL; | |
1429 | goto err_release; | |
1430 | } | |
1431 | ||
1432 | /* Allocate ucode buffers for card's bus-master loading ... */ | |
1433 | ||
1434 | /* Runtime instructions and 2 copies of data: | |
1435 | * 1) unmodified from disk | |
1436 | * 2) backup cache for save/restore during power-downs */ | |
1437 | priv->ucode_code.len = inst_size; | |
1438 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); | |
1439 | ||
1440 | priv->ucode_data.len = data_size; | |
1441 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); | |
1442 | ||
1443 | priv->ucode_data_backup.len = data_size; | |
1444 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); | |
1445 | ||
1446 | if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || | |
1447 | !priv->ucode_data_backup.v_addr) | |
1448 | goto err_pci_alloc; | |
1449 | ||
1450 | /* Initialization instructions and data */ | |
1451 | if (init_size && init_data_size) { | |
1452 | priv->ucode_init.len = init_size; | |
1453 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); | |
1454 | ||
1455 | priv->ucode_init_data.len = init_data_size; | |
1456 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); | |
1457 | ||
1458 | if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) | |
1459 | goto err_pci_alloc; | |
1460 | } | |
1461 | ||
1462 | /* Bootstrap (instructions only, no data) */ | |
1463 | if (boot_size) { | |
1464 | priv->ucode_boot.len = boot_size; | |
1465 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); | |
1466 | ||
1467 | if (!priv->ucode_boot.v_addr) | |
1468 | goto err_pci_alloc; | |
1469 | } | |
1470 | ||
1471 | /* Copy images into buffers for card's bus-master reads ... */ | |
1472 | ||
1473 | /* Runtime instructions (first block of data in file) */ | |
1474 | len = inst_size; | |
1475 | IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len); | |
1476 | memcpy(priv->ucode_code.v_addr, src, len); | |
1477 | src += len; | |
1478 | ||
1479 | IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", | |
1480 | priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); | |
1481 | ||
1482 | /* Runtime data (2nd block) | |
1483 | * NOTE: Copy into backup buffer will be done in iwl_up() */ | |
1484 | len = data_size; | |
1485 | IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len); | |
1486 | memcpy(priv->ucode_data.v_addr, src, len); | |
1487 | memcpy(priv->ucode_data_backup.v_addr, src, len); | |
1488 | src += len; | |
1489 | ||
1490 | /* Initialization instructions (3rd block) */ | |
1491 | if (init_size) { | |
1492 | len = init_size; | |
1493 | IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n", | |
1494 | len); | |
1495 | memcpy(priv->ucode_init.v_addr, src, len); | |
1496 | src += len; | |
1497 | } | |
1498 | ||
1499 | /* Initialization data (4th block) */ | |
1500 | if (init_data_size) { | |
1501 | len = init_data_size; | |
1502 | IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n", | |
1503 | len); | |
1504 | memcpy(priv->ucode_init_data.v_addr, src, len); | |
1505 | src += len; | |
1506 | } | |
1507 | ||
1508 | /* Bootstrap instructions (5th block) */ | |
1509 | len = boot_size; | |
1510 | IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len); | |
1511 | memcpy(priv->ucode_boot.v_addr, src, len); | |
1512 | ||
1513 | /* We have our copies now, allow OS release its copies */ | |
1514 | release_firmware(ucode_raw); | |
1515 | return 0; | |
1516 | ||
1517 | err_pci_alloc: | |
1518 | IWL_ERR(priv, "failed to allocate pci memory\n"); | |
1519 | ret = -ENOMEM; | |
1520 | iwl_dealloc_ucode_pci(priv); | |
1521 | ||
1522 | err_release: | |
1523 | release_firmware(ucode_raw); | |
1524 | ||
1525 | error: | |
1526 | return ret; | |
1527 | } | |
1528 | ||
1529 | #ifdef CONFIG_IWLWIFI_DEBUG | |
1530 | static const char *desc_lookup_text[] = { | |
1531 | "OK", | |
1532 | "FAIL", | |
1533 | "BAD_PARAM", | |
1534 | "BAD_CHECKSUM", | |
1535 | "NMI_INTERRUPT_WDG", | |
1536 | "SYSASSERT", | |
1537 | "FATAL_ERROR", | |
1538 | "BAD_COMMAND", | |
1539 | "HW_ERROR_TUNE_LOCK", | |
1540 | "HW_ERROR_TEMPERATURE", | |
1541 | "ILLEGAL_CHAN_FREQ", | |
1542 | "VCC_NOT_STABLE", | |
1543 | "FH_ERROR", | |
1544 | "NMI_INTERRUPT_HOST", | |
1545 | "NMI_INTERRUPT_ACTION_PT", | |
1546 | "NMI_INTERRUPT_UNKNOWN", | |
1547 | "UCODE_VERSION_MISMATCH", | |
1548 | "HW_ERROR_ABS_LOCK", | |
1549 | "HW_ERROR_CAL_LOCK_FAIL", | |
1550 | "NMI_INTERRUPT_INST_ACTION_PT", | |
1551 | "NMI_INTERRUPT_DATA_ACTION_PT", | |
1552 | "NMI_TRM_HW_ER", | |
1553 | "NMI_INTERRUPT_TRM", | |
1554 | "NMI_INTERRUPT_BREAK_POINT" | |
1555 | "DEBUG_0", | |
1556 | "DEBUG_1", | |
1557 | "DEBUG_2", | |
1558 | "DEBUG_3", | |
1559 | "UNKNOWN" | |
1560 | }; | |
1561 | ||
1562 | static const char *desc_lookup(int i) | |
1563 | { | |
1564 | int max = ARRAY_SIZE(desc_lookup_text) - 1; | |
1565 | ||
1566 | if (i < 0 || i > max) | |
1567 | i = max; | |
1568 | ||
1569 | return desc_lookup_text[i]; | |
1570 | } | |
1571 | ||
1572 | #define ERROR_START_OFFSET (1 * sizeof(u32)) | |
1573 | #define ERROR_ELEM_SIZE (7 * sizeof(u32)) | |
1574 | ||
1575 | void iwl_dump_nic_error_log(struct iwl_priv *priv) | |
1576 | { | |
1577 | u32 data2, line; | |
1578 | u32 desc, time, count, base, data1; | |
1579 | u32 blink1, blink2, ilink1, ilink2; | |
1580 | ||
1581 | if (priv->ucode_type == UCODE_INIT) | |
1582 | base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); | |
1583 | else | |
1584 | base = le32_to_cpu(priv->card_alive.error_event_table_ptr); | |
1585 | ||
1586 | if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { | |
1587 | IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); | |
1588 | return; | |
1589 | } | |
1590 | ||
1591 | count = iwl_read_targ_mem(priv, base); | |
1592 | ||
1593 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { | |
1594 | IWL_ERR(priv, "Start IWL Error Log Dump:\n"); | |
1595 | IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", | |
1596 | priv->status, count); | |
1597 | } | |
1598 | ||
1599 | desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); | |
1600 | blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); | |
1601 | blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); | |
1602 | ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); | |
1603 | ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32)); | |
1604 | data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32)); | |
1605 | data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); | |
1606 | line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); | |
1607 | time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); | |
1608 | ||
1609 | IWL_ERR(priv, "Desc Time " | |
1610 | "data1 data2 line\n"); | |
1611 | IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", | |
1612 | desc_lookup(desc), desc, time, data1, data2, line); | |
1613 | IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n"); | |
1614 | IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, | |
1615 | ilink1, ilink2); | |
1616 | ||
1617 | } | |
1618 | ||
1619 | #define EVENT_START_OFFSET (4 * sizeof(u32)) | |
1620 | ||
1621 | /** | |
1622 | * iwl_print_event_log - Dump error event log to syslog | |
1623 | * | |
1624 | */ | |
1625 | static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, | |
1626 | u32 num_events, u32 mode) | |
1627 | { | |
1628 | u32 i; | |
1629 | u32 base; /* SRAM byte address of event log header */ | |
1630 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ | |
1631 | u32 ptr; /* SRAM byte address of log data */ | |
1632 | u32 ev, time, data; /* event log data */ | |
1633 | ||
1634 | if (num_events == 0) | |
1635 | return; | |
1636 | if (priv->ucode_type == UCODE_INIT) | |
1637 | base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); | |
1638 | else | |
1639 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | |
1640 | ||
1641 | if (mode == 0) | |
1642 | event_size = 2 * sizeof(u32); | |
1643 | else | |
1644 | event_size = 3 * sizeof(u32); | |
1645 | ||
1646 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); | |
1647 | ||
1648 | /* "time" is actually "data" for mode 0 (no timestamp). | |
1649 | * place event id # at far right for easier visual parsing. */ | |
1650 | for (i = 0; i < num_events; i++) { | |
1651 | ev = iwl_read_targ_mem(priv, ptr); | |
1652 | ptr += sizeof(u32); | |
1653 | time = iwl_read_targ_mem(priv, ptr); | |
1654 | ptr += sizeof(u32); | |
1655 | if (mode == 0) { | |
1656 | /* data, ev */ | |
1657 | IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev); | |
1658 | } else { | |
1659 | data = iwl_read_targ_mem(priv, ptr); | |
1660 | ptr += sizeof(u32); | |
1661 | IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", | |
1662 | time, data, ev); | |
1663 | } | |
1664 | } | |
1665 | } | |
1666 | ||
1667 | void iwl_dump_nic_event_log(struct iwl_priv *priv) | |
1668 | { | |
1669 | u32 base; /* SRAM byte address of event log header */ | |
1670 | u32 capacity; /* event log capacity in # entries */ | |
1671 | u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ | |
1672 | u32 num_wraps; /* # times uCode wrapped to top of log */ | |
1673 | u32 next_entry; /* index of next entry to be written by uCode */ | |
1674 | u32 size; /* # entries that we'll print */ | |
1675 | ||
1676 | if (priv->ucode_type == UCODE_INIT) | |
1677 | base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); | |
1678 | else | |
1679 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | |
1680 | ||
1681 | if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { | |
1682 | IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); | |
1683 | return; | |
1684 | } | |
1685 | ||
1686 | /* event log header */ | |
1687 | capacity = iwl_read_targ_mem(priv, base); | |
1688 | mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); | |
1689 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); | |
1690 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); | |
1691 | ||
1692 | size = num_wraps ? capacity : next_entry; | |
1693 | ||
1694 | /* bail out if nothing in log */ | |
1695 | if (size == 0) { | |
1696 | IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); | |
1697 | return; | |
1698 | } | |
1699 | ||
1700 | IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n", | |
1701 | size, num_wraps); | |
1702 | ||
1703 | /* if uCode has wrapped back to top of log, start at the oldest entry, | |
1704 | * i.e the next one that uCode would fill. */ | |
1705 | if (num_wraps) | |
1706 | iwl_print_event_log(priv, next_entry, | |
1707 | capacity - next_entry, mode); | |
1708 | /* (then/else) start at top of log */ | |
1709 | iwl_print_event_log(priv, 0, next_entry, mode); | |
1710 | ||
1711 | } | |
1712 | #endif | |
1713 | ||
1714 | /** | |
1715 | * iwl_alive_start - called after REPLY_ALIVE notification received | |
1716 | * from protocol/runtime uCode (initialization uCode's | |
1717 | * Alive gets handled by iwl_init_alive_start()). | |
1718 | */ | |
1719 | static void iwl_alive_start(struct iwl_priv *priv) | |
1720 | { | |
1721 | int ret = 0; | |
1722 | ||
1723 | IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); | |
1724 | ||
1725 | if (priv->card_alive.is_valid != UCODE_VALID_OK) { | |
1726 | /* We had an error bringing up the hardware, so take it | |
1727 | * all the way back down so we can try again */ | |
1728 | IWL_DEBUG_INFO(priv, "Alive failed.\n"); | |
1729 | goto restart; | |
1730 | } | |
1731 | ||
1732 | /* Initialize uCode has loaded Runtime uCode ... verify inst image. | |
1733 | * This is a paranoid check, because we would not have gotten the | |
1734 | * "runtime" alive if code weren't properly loaded. */ | |
1735 | if (iwl_verify_ucode(priv)) { | |
1736 | /* Runtime instruction load was bad; | |
1737 | * take it all the way back down so we can try again */ | |
1738 | IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); | |
1739 | goto restart; | |
1740 | } | |
1741 | ||
1742 | iwl_clear_stations_table(priv); | |
1743 | ret = priv->cfg->ops->lib->alive_notify(priv); | |
1744 | if (ret) { | |
1745 | IWL_WARN(priv, | |
1746 | "Could not complete ALIVE transition [ntf]: %d\n", ret); | |
1747 | goto restart; | |
1748 | } | |
1749 | ||
1750 | /* After the ALIVE response, we can send host commands to the uCode */ | |
1751 | set_bit(STATUS_ALIVE, &priv->status); | |
1752 | ||
1753 | if (iwl_is_rfkill(priv)) | |
1754 | return; | |
1755 | ||
1756 | ieee80211_wake_queues(priv->hw); | |
1757 | ||
1758 | priv->active_rate = priv->rates_mask; | |
1759 | priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; | |
1760 | ||
1761 | if (iwl_is_associated(priv)) { | |
1762 | struct iwl_rxon_cmd *active_rxon = | |
1763 | (struct iwl_rxon_cmd *)&priv->active_rxon; | |
1764 | /* apply any changes in staging */ | |
1765 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | |
1766 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | |
1767 | } else { | |
1768 | /* Initialize our rx_config data */ | |
1769 | iwl_connection_init_rx_config(priv, priv->iw_mode); | |
1770 | ||
1771 | if (priv->cfg->ops->hcmd->set_rxon_chain) | |
1772 | priv->cfg->ops->hcmd->set_rxon_chain(priv); | |
1773 | ||
1774 | memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); | |
1775 | } | |
1776 | ||
1777 | /* Configure Bluetooth device coexistence support */ | |
1778 | iwl_send_bt_config(priv); | |
1779 | ||
1780 | iwl_reset_run_time_calib(priv); | |
1781 | ||
1782 | /* Configure the adapter for unassociated operation */ | |
1783 | iwlcore_commit_rxon(priv); | |
1784 | ||
1785 | /* At this point, the NIC is initialized and operational */ | |
1786 | iwl_rf_kill_ct_config(priv); | |
1787 | ||
1788 | iwl_leds_register(priv); | |
1789 | ||
1790 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | |
1791 | set_bit(STATUS_READY, &priv->status); | |
1792 | wake_up_interruptible(&priv->wait_command_queue); | |
1793 | ||
1794 | iwl_power_update_mode(priv, true); | |
1795 | ||
1796 | /* reassociate for ADHOC mode */ | |
1797 | if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) { | |
1798 | struct sk_buff *beacon = ieee80211_beacon_get(priv->hw, | |
1799 | priv->vif); | |
1800 | if (beacon) | |
1801 | iwl_mac_beacon_update(priv->hw, beacon); | |
1802 | } | |
1803 | ||
1804 | ||
1805 | if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) | |
1806 | iwl_set_mode(priv, priv->iw_mode); | |
1807 | ||
1808 | return; | |
1809 | ||
1810 | restart: | |
1811 | queue_work(priv->workqueue, &priv->restart); | |
1812 | } | |
1813 | ||
1814 | static void iwl_cancel_deferred_work(struct iwl_priv *priv); | |
1815 | ||
1816 | static void __iwl_down(struct iwl_priv *priv) | |
1817 | { | |
1818 | unsigned long flags; | |
1819 | int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); | |
1820 | ||
1821 | IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); | |
1822 | ||
1823 | if (!exit_pending) | |
1824 | set_bit(STATUS_EXIT_PENDING, &priv->status); | |
1825 | ||
1826 | iwl_leds_unregister(priv); | |
1827 | ||
1828 | iwl_clear_stations_table(priv); | |
1829 | ||
1830 | /* Unblock any waiting calls */ | |
1831 | wake_up_interruptible_all(&priv->wait_command_queue); | |
1832 | ||
1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually | |
1834 | * exiting the module */ | |
1835 | if (!exit_pending) | |
1836 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | |
1837 | ||
1838 | /* stop and reset the on-board processor */ | |
1839 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | |
1840 | ||
1841 | /* tell the device to stop sending interrupts */ | |
1842 | spin_lock_irqsave(&priv->lock, flags); | |
1843 | iwl_disable_interrupts(priv); | |
1844 | spin_unlock_irqrestore(&priv->lock, flags); | |
1845 | iwl_synchronize_irq(priv); | |
1846 | ||
1847 | if (priv->mac80211_registered) | |
1848 | ieee80211_stop_queues(priv->hw); | |
1849 | ||
1850 | /* If we have not previously called iwl_init() then | |
1851 | * clear all bits but the RF Kill bit and return */ | |
1852 | if (!iwl_is_init(priv)) { | |
1853 | priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << | |
1854 | STATUS_RF_KILL_HW | | |
1855 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << | |
1856 | STATUS_GEO_CONFIGURED | | |
1857 | test_bit(STATUS_EXIT_PENDING, &priv->status) << | |
1858 | STATUS_EXIT_PENDING; | |
1859 | goto exit; | |
1860 | } | |
1861 | ||
1862 | /* ...otherwise clear out all the status bits but the RF Kill | |
1863 | * bit and continue taking the NIC down. */ | |
1864 | priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << | |
1865 | STATUS_RF_KILL_HW | | |
1866 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << | |
1867 | STATUS_GEO_CONFIGURED | | |
1868 | test_bit(STATUS_FW_ERROR, &priv->status) << | |
1869 | STATUS_FW_ERROR | | |
1870 | test_bit(STATUS_EXIT_PENDING, &priv->status) << | |
1871 | STATUS_EXIT_PENDING; | |
1872 | ||
1873 | /* device going down, Stop using ICT table */ | |
1874 | iwl_disable_ict(priv); | |
1875 | spin_lock_irqsave(&priv->lock, flags); | |
1876 | iwl_clear_bit(priv, CSR_GP_CNTRL, | |
1877 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
1878 | spin_unlock_irqrestore(&priv->lock, flags); | |
1879 | ||
1880 | iwl_txq_ctx_stop(priv); | |
1881 | iwl_rxq_stop(priv); | |
1882 | ||
1883 | iwl_write_prph(priv, APMG_CLK_DIS_REG, | |
1884 | APMG_CLK_VAL_DMA_CLK_RQT); | |
1885 | ||
1886 | udelay(5); | |
1887 | ||
1888 | /* FIXME: apm_ops.suspend(priv) */ | |
1889 | if (exit_pending) | |
1890 | priv->cfg->ops->lib->apm_ops.stop(priv); | |
1891 | else | |
1892 | priv->cfg->ops->lib->apm_ops.reset(priv); | |
1893 | exit: | |
1894 | memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); | |
1895 | ||
1896 | if (priv->ibss_beacon) | |
1897 | dev_kfree_skb(priv->ibss_beacon); | |
1898 | priv->ibss_beacon = NULL; | |
1899 | ||
1900 | /* clear out any free frames */ | |
1901 | iwl_clear_free_frames(priv); | |
1902 | } | |
1903 | ||
1904 | static void iwl_down(struct iwl_priv *priv) | |
1905 | { | |
1906 | mutex_lock(&priv->mutex); | |
1907 | __iwl_down(priv); | |
1908 | mutex_unlock(&priv->mutex); | |
1909 | ||
1910 | iwl_cancel_deferred_work(priv); | |
1911 | } | |
1912 | ||
1913 | #define HW_READY_TIMEOUT (50) | |
1914 | ||
1915 | static int iwl_set_hw_ready(struct iwl_priv *priv) | |
1916 | { | |
1917 | int ret = 0; | |
1918 | ||
1919 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | |
1920 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); | |
1921 | ||
1922 | /* See if we got it */ | |
1923 | ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, | |
1924 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
1925 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
1926 | HW_READY_TIMEOUT); | |
1927 | if (ret != -ETIMEDOUT) | |
1928 | priv->hw_ready = true; | |
1929 | else | |
1930 | priv->hw_ready = false; | |
1931 | ||
1932 | IWL_DEBUG_INFO(priv, "hardware %s\n", | |
1933 | (priv->hw_ready == 1) ? "ready" : "not ready"); | |
1934 | return ret; | |
1935 | } | |
1936 | ||
1937 | static int iwl_prepare_card_hw(struct iwl_priv *priv) | |
1938 | { | |
1939 | int ret = 0; | |
1940 | ||
1941 | IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n"); | |
1942 | ||
1943 | ret = iwl_set_hw_ready(priv); | |
1944 | if (priv->hw_ready) | |
1945 | return ret; | |
1946 | ||
1947 | /* If HW is not ready, prepare the conditions to check again */ | |
1948 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | |
1949 | CSR_HW_IF_CONFIG_REG_PREPARE); | |
1950 | ||
1951 | ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, | |
1952 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, | |
1953 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | |
1954 | ||
1955 | /* HW should be ready by now, check again. */ | |
1956 | if (ret != -ETIMEDOUT) | |
1957 | iwl_set_hw_ready(priv); | |
1958 | ||
1959 | return ret; | |
1960 | } | |
1961 | ||
1962 | #define MAX_HW_RESTARTS 5 | |
1963 | ||
1964 | static int __iwl_up(struct iwl_priv *priv) | |
1965 | { | |
1966 | int i; | |
1967 | int ret; | |
1968 | ||
1969 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | |
1970 | IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); | |
1971 | return -EIO; | |
1972 | } | |
1973 | ||
1974 | if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { | |
1975 | IWL_ERR(priv, "ucode not available for device bringup\n"); | |
1976 | return -EIO; | |
1977 | } | |
1978 | ||
1979 | iwl_prepare_card_hw(priv); | |
1980 | ||
1981 | if (!priv->hw_ready) { | |
1982 | IWL_WARN(priv, "Exit HW not ready\n"); | |
1983 | return -EIO; | |
1984 | } | |
1985 | ||
1986 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
1987 | if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | |
1988 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | |
1989 | else | |
1990 | set_bit(STATUS_RF_KILL_HW, &priv->status); | |
1991 | ||
1992 | if (iwl_is_rfkill(priv)) { | |
1993 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); | |
1994 | ||
1995 | iwl_enable_interrupts(priv); | |
1996 | IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); | |
1997 | return 0; | |
1998 | } | |
1999 | ||
2000 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | |
2001 | ||
2002 | ret = iwl_hw_nic_init(priv); | |
2003 | if (ret) { | |
2004 | IWL_ERR(priv, "Unable to init nic\n"); | |
2005 | return ret; | |
2006 | } | |
2007 | ||
2008 | /* make sure rfkill handshake bits are cleared */ | |
2009 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
2010 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | |
2011 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | |
2012 | ||
2013 | /* clear (again), then enable host interrupts */ | |
2014 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | |
2015 | iwl_enable_interrupts(priv); | |
2016 | ||
2017 | /* really make sure rfkill handshake bits are cleared */ | |
2018 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
2019 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
2020 | ||
2021 | /* Copy original ucode data image from disk into backup cache. | |
2022 | * This will be used to initialize the on-board processor's | |
2023 | * data SRAM for a clean start when the runtime program first loads. */ | |
2024 | memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, | |
2025 | priv->ucode_data.len); | |
2026 | ||
2027 | for (i = 0; i < MAX_HW_RESTARTS; i++) { | |
2028 | ||
2029 | iwl_clear_stations_table(priv); | |
2030 | ||
2031 | /* load bootstrap state machine, | |
2032 | * load bootstrap program into processor's memory, | |
2033 | * prepare to load the "initialize" uCode */ | |
2034 | ret = priv->cfg->ops->lib->load_ucode(priv); | |
2035 | ||
2036 | if (ret) { | |
2037 | IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", | |
2038 | ret); | |
2039 | continue; | |
2040 | } | |
2041 | ||
2042 | /* start card; "initialize" will load runtime ucode */ | |
2043 | iwl_nic_start(priv); | |
2044 | ||
2045 | IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); | |
2046 | ||
2047 | return 0; | |
2048 | } | |
2049 | ||
2050 | set_bit(STATUS_EXIT_PENDING, &priv->status); | |
2051 | __iwl_down(priv); | |
2052 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | |
2053 | ||
2054 | /* tried to restart and config the device for as long as our | |
2055 | * patience could withstand */ | |
2056 | IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); | |
2057 | return -EIO; | |
2058 | } | |
2059 | ||
2060 | ||
2061 | /***************************************************************************** | |
2062 | * | |
2063 | * Workqueue callbacks | |
2064 | * | |
2065 | *****************************************************************************/ | |
2066 | ||
2067 | static void iwl_bg_init_alive_start(struct work_struct *data) | |
2068 | { | |
2069 | struct iwl_priv *priv = | |
2070 | container_of(data, struct iwl_priv, init_alive_start.work); | |
2071 | ||
2072 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2073 | return; | |
2074 | ||
2075 | mutex_lock(&priv->mutex); | |
2076 | priv->cfg->ops->lib->init_alive_start(priv); | |
2077 | mutex_unlock(&priv->mutex); | |
2078 | } | |
2079 | ||
2080 | static void iwl_bg_alive_start(struct work_struct *data) | |
2081 | { | |
2082 | struct iwl_priv *priv = | |
2083 | container_of(data, struct iwl_priv, alive_start.work); | |
2084 | ||
2085 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2086 | return; | |
2087 | ||
2088 | /* enable dram interrupt */ | |
2089 | iwl_reset_ict(priv); | |
2090 | ||
2091 | mutex_lock(&priv->mutex); | |
2092 | iwl_alive_start(priv); | |
2093 | mutex_unlock(&priv->mutex); | |
2094 | } | |
2095 | ||
2096 | static void iwl_bg_run_time_calib_work(struct work_struct *work) | |
2097 | { | |
2098 | struct iwl_priv *priv = container_of(work, struct iwl_priv, | |
2099 | run_time_calib_work); | |
2100 | ||
2101 | mutex_lock(&priv->mutex); | |
2102 | ||
2103 | if (test_bit(STATUS_EXIT_PENDING, &priv->status) || | |
2104 | test_bit(STATUS_SCANNING, &priv->status)) { | |
2105 | mutex_unlock(&priv->mutex); | |
2106 | return; | |
2107 | } | |
2108 | ||
2109 | if (priv->start_calib) { | |
2110 | iwl_chain_noise_calibration(priv, &priv->statistics); | |
2111 | ||
2112 | iwl_sensitivity_calibration(priv, &priv->statistics); | |
2113 | } | |
2114 | ||
2115 | mutex_unlock(&priv->mutex); | |
2116 | return; | |
2117 | } | |
2118 | ||
2119 | static void iwl_bg_up(struct work_struct *data) | |
2120 | { | |
2121 | struct iwl_priv *priv = container_of(data, struct iwl_priv, up); | |
2122 | ||
2123 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2124 | return; | |
2125 | ||
2126 | mutex_lock(&priv->mutex); | |
2127 | __iwl_up(priv); | |
2128 | mutex_unlock(&priv->mutex); | |
2129 | } | |
2130 | ||
2131 | static void iwl_bg_restart(struct work_struct *data) | |
2132 | { | |
2133 | struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); | |
2134 | ||
2135 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2136 | return; | |
2137 | ||
2138 | if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { | |
2139 | mutex_lock(&priv->mutex); | |
2140 | priv->vif = NULL; | |
2141 | priv->is_open = 0; | |
2142 | mutex_unlock(&priv->mutex); | |
2143 | iwl_down(priv); | |
2144 | ieee80211_restart_hw(priv->hw); | |
2145 | } else { | |
2146 | iwl_down(priv); | |
2147 | queue_work(priv->workqueue, &priv->up); | |
2148 | } | |
2149 | } | |
2150 | ||
2151 | static void iwl_bg_rx_replenish(struct work_struct *data) | |
2152 | { | |
2153 | struct iwl_priv *priv = | |
2154 | container_of(data, struct iwl_priv, rx_replenish); | |
2155 | ||
2156 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2157 | return; | |
2158 | ||
2159 | mutex_lock(&priv->mutex); | |
2160 | iwl_rx_replenish(priv); | |
2161 | mutex_unlock(&priv->mutex); | |
2162 | } | |
2163 | ||
2164 | #define IWL_DELAY_NEXT_SCAN (HZ*2) | |
2165 | ||
2166 | void iwl_post_associate(struct iwl_priv *priv) | |
2167 | { | |
2168 | struct ieee80211_conf *conf = NULL; | |
2169 | int ret = 0; | |
2170 | unsigned long flags; | |
2171 | ||
2172 | if (priv->iw_mode == NL80211_IFTYPE_AP) { | |
2173 | IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); | |
2174 | return; | |
2175 | } | |
2176 | ||
2177 | IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", | |
2178 | priv->assoc_id, priv->active_rxon.bssid_addr); | |
2179 | ||
2180 | ||
2181 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2182 | return; | |
2183 | ||
2184 | ||
2185 | if (!priv->vif || !priv->is_open) | |
2186 | return; | |
2187 | ||
2188 | iwl_scan_cancel_timeout(priv, 200); | |
2189 | ||
2190 | conf = ieee80211_get_hw_conf(priv->hw); | |
2191 | ||
2192 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | |
2193 | iwlcore_commit_rxon(priv); | |
2194 | ||
2195 | iwl_setup_rxon_timing(priv); | |
2196 | ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, | |
2197 | sizeof(priv->rxon_timing), &priv->rxon_timing); | |
2198 | if (ret) | |
2199 | IWL_WARN(priv, "REPLY_RXON_TIMING failed - " | |
2200 | "Attempting to continue.\n"); | |
2201 | ||
2202 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | |
2203 | ||
2204 | iwl_set_rxon_ht(priv, &priv->current_ht_config); | |
2205 | ||
2206 | if (priv->cfg->ops->hcmd->set_rxon_chain) | |
2207 | priv->cfg->ops->hcmd->set_rxon_chain(priv); | |
2208 | ||
2209 | priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); | |
2210 | ||
2211 | IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", | |
2212 | priv->assoc_id, priv->beacon_int); | |
2213 | ||
2214 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) | |
2215 | priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; | |
2216 | else | |
2217 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; | |
2218 | ||
2219 | if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { | |
2220 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) | |
2221 | priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; | |
2222 | else | |
2223 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | |
2224 | ||
2225 | if (priv->iw_mode == NL80211_IFTYPE_ADHOC) | |
2226 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | |
2227 | ||
2228 | } | |
2229 | ||
2230 | iwlcore_commit_rxon(priv); | |
2231 | ||
2232 | switch (priv->iw_mode) { | |
2233 | case NL80211_IFTYPE_STATION: | |
2234 | break; | |
2235 | ||
2236 | case NL80211_IFTYPE_ADHOC: | |
2237 | ||
2238 | /* assume default assoc id */ | |
2239 | priv->assoc_id = 1; | |
2240 | ||
2241 | iwl_rxon_add_station(priv, priv->bssid, 0); | |
2242 | iwl_send_beacon_cmd(priv); | |
2243 | ||
2244 | break; | |
2245 | ||
2246 | default: | |
2247 | IWL_ERR(priv, "%s Should not be called in %d mode\n", | |
2248 | __func__, priv->iw_mode); | |
2249 | break; | |
2250 | } | |
2251 | ||
2252 | if (priv->iw_mode == NL80211_IFTYPE_ADHOC) | |
2253 | priv->assoc_station_added = 1; | |
2254 | ||
2255 | spin_lock_irqsave(&priv->lock, flags); | |
2256 | iwl_activate_qos(priv, 0); | |
2257 | spin_unlock_irqrestore(&priv->lock, flags); | |
2258 | ||
2259 | /* the chain noise calibration will enabled PM upon completion | |
2260 | * If chain noise has already been run, then we need to enable | |
2261 | * power management here */ | |
2262 | if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) | |
2263 | iwl_power_update_mode(priv, false); | |
2264 | ||
2265 | /* Enable Rx differential gain and sensitivity calibrations */ | |
2266 | iwl_chain_noise_reset(priv); | |
2267 | priv->start_calib = 1; | |
2268 | ||
2269 | } | |
2270 | ||
2271 | /***************************************************************************** | |
2272 | * | |
2273 | * mac80211 entry point functions | |
2274 | * | |
2275 | *****************************************************************************/ | |
2276 | ||
2277 | #define UCODE_READY_TIMEOUT (4 * HZ) | |
2278 | ||
2279 | static int iwl_mac_start(struct ieee80211_hw *hw) | |
2280 | { | |
2281 | struct iwl_priv *priv = hw->priv; | |
2282 | int ret; | |
2283 | ||
2284 | IWL_DEBUG_MAC80211(priv, "enter\n"); | |
2285 | ||
2286 | /* we should be verifying the device is ready to be opened */ | |
2287 | mutex_lock(&priv->mutex); | |
2288 | ||
2289 | /* fetch ucode file from disk, alloc and copy to bus-master buffers ... | |
2290 | * ucode filename and max sizes are card-specific. */ | |
2291 | ||
2292 | if (!priv->ucode_code.len) { | |
2293 | ret = iwl_read_ucode(priv); | |
2294 | if (ret) { | |
2295 | IWL_ERR(priv, "Could not read microcode: %d\n", ret); | |
2296 | mutex_unlock(&priv->mutex); | |
2297 | return ret; | |
2298 | } | |
2299 | } | |
2300 | ||
2301 | ret = __iwl_up(priv); | |
2302 | ||
2303 | mutex_unlock(&priv->mutex); | |
2304 | ||
2305 | if (ret) | |
2306 | return ret; | |
2307 | ||
2308 | if (iwl_is_rfkill(priv)) | |
2309 | goto out; | |
2310 | ||
2311 | IWL_DEBUG_INFO(priv, "Start UP work done.\n"); | |
2312 | ||
2313 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from | |
2314 | * mac80211 will not be run successfully. */ | |
2315 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | |
2316 | test_bit(STATUS_READY, &priv->status), | |
2317 | UCODE_READY_TIMEOUT); | |
2318 | if (!ret) { | |
2319 | if (!test_bit(STATUS_READY, &priv->status)) { | |
2320 | IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", | |
2321 | jiffies_to_msecs(UCODE_READY_TIMEOUT)); | |
2322 | return -ETIMEDOUT; | |
2323 | } | |
2324 | } | |
2325 | ||
2326 | out: | |
2327 | priv->is_open = 1; | |
2328 | IWL_DEBUG_MAC80211(priv, "leave\n"); | |
2329 | return 0; | |
2330 | } | |
2331 | ||
2332 | static void iwl_mac_stop(struct ieee80211_hw *hw) | |
2333 | { | |
2334 | struct iwl_priv *priv = hw->priv; | |
2335 | ||
2336 | IWL_DEBUG_MAC80211(priv, "enter\n"); | |
2337 | ||
2338 | if (!priv->is_open) | |
2339 | return; | |
2340 | ||
2341 | priv->is_open = 0; | |
2342 | ||
2343 | if (iwl_is_ready_rf(priv) || test_bit(STATUS_SCAN_HW, &priv->status)) { | |
2344 | /* stop mac, cancel any scan request and clear | |
2345 | * RXON_FILTER_ASSOC_MSK BIT | |
2346 | */ | |
2347 | mutex_lock(&priv->mutex); | |
2348 | iwl_scan_cancel_timeout(priv, 100); | |
2349 | mutex_unlock(&priv->mutex); | |
2350 | } | |
2351 | ||
2352 | iwl_down(priv); | |
2353 | ||
2354 | flush_workqueue(priv->workqueue); | |
2355 | ||
2356 | /* enable interrupts again in order to receive rfkill changes */ | |
2357 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | |
2358 | iwl_enable_interrupts(priv); | |
2359 | ||
2360 | IWL_DEBUG_MAC80211(priv, "leave\n"); | |
2361 | } | |
2362 | ||
2363 | static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |
2364 | { | |
2365 | struct iwl_priv *priv = hw->priv; | |
2366 | ||
2367 | IWL_DEBUG_MACDUMP(priv, "enter\n"); | |
2368 | ||
2369 | IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, | |
2370 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); | |
2371 | ||
2372 | if (iwl_tx_skb(priv, skb)) | |
2373 | dev_kfree_skb_any(skb); | |
2374 | ||
2375 | IWL_DEBUG_MACDUMP(priv, "leave\n"); | |
2376 | return NETDEV_TX_OK; | |
2377 | } | |
2378 | ||
2379 | void iwl_config_ap(struct iwl_priv *priv) | |
2380 | { | |
2381 | int ret = 0; | |
2382 | unsigned long flags; | |
2383 | ||
2384 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2385 | return; | |
2386 | ||
2387 | /* The following should be done only at AP bring up */ | |
2388 | if (!iwl_is_associated(priv)) { | |
2389 | ||
2390 | /* RXON - unassoc (to set timing command) */ | |
2391 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | |
2392 | iwlcore_commit_rxon(priv); | |
2393 | ||
2394 | /* RXON Timing */ | |
2395 | iwl_setup_rxon_timing(priv); | |
2396 | ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, | |
2397 | sizeof(priv->rxon_timing), &priv->rxon_timing); | |
2398 | if (ret) | |
2399 | IWL_WARN(priv, "REPLY_RXON_TIMING failed - " | |
2400 | "Attempting to continue.\n"); | |
2401 | ||
2402 | if (priv->cfg->ops->hcmd->set_rxon_chain) | |
2403 | priv->cfg->ops->hcmd->set_rxon_chain(priv); | |
2404 | ||
2405 | /* FIXME: what should be the assoc_id for AP? */ | |
2406 | priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); | |
2407 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) | |
2408 | priv->staging_rxon.flags |= | |
2409 | RXON_FLG_SHORT_PREAMBLE_MSK; | |
2410 | else | |
2411 | priv->staging_rxon.flags &= | |
2412 | ~RXON_FLG_SHORT_PREAMBLE_MSK; | |
2413 | ||
2414 | if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { | |
2415 | if (priv->assoc_capability & | |
2416 | WLAN_CAPABILITY_SHORT_SLOT_TIME) | |
2417 | priv->staging_rxon.flags |= | |
2418 | RXON_FLG_SHORT_SLOT_MSK; | |
2419 | else | |
2420 | priv->staging_rxon.flags &= | |
2421 | ~RXON_FLG_SHORT_SLOT_MSK; | |
2422 | ||
2423 | if (priv->iw_mode == NL80211_IFTYPE_ADHOC) | |
2424 | priv->staging_rxon.flags &= | |
2425 | ~RXON_FLG_SHORT_SLOT_MSK; | |
2426 | } | |
2427 | /* restore RXON assoc */ | |
2428 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | |
2429 | iwlcore_commit_rxon(priv); | |
2430 | spin_lock_irqsave(&priv->lock, flags); | |
2431 | iwl_activate_qos(priv, 1); | |
2432 | spin_unlock_irqrestore(&priv->lock, flags); | |
2433 | iwl_rxon_add_station(priv, iwl_bcast_addr, 0); | |
2434 | } | |
2435 | iwl_send_beacon_cmd(priv); | |
2436 | ||
2437 | /* FIXME - we need to add code here to detect a totally new | |
2438 | * configuration, reset the AP, unassoc, rxon timing, assoc, | |
2439 | * clear sta table, add BCAST sta... */ | |
2440 | } | |
2441 | ||
2442 | static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw, | |
2443 | struct ieee80211_key_conf *keyconf, const u8 *addr, | |
2444 | u32 iv32, u16 *phase1key) | |
2445 | { | |
2446 | ||
2447 | struct iwl_priv *priv = hw->priv; | |
2448 | IWL_DEBUG_MAC80211(priv, "enter\n"); | |
2449 | ||
2450 | iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key); | |
2451 | ||
2452 | IWL_DEBUG_MAC80211(priv, "leave\n"); | |
2453 | } | |
2454 | ||
2455 | static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |
2456 | struct ieee80211_vif *vif, | |
2457 | struct ieee80211_sta *sta, | |
2458 | struct ieee80211_key_conf *key) | |
2459 | { | |
2460 | struct iwl_priv *priv = hw->priv; | |
2461 | const u8 *addr; | |
2462 | int ret; | |
2463 | u8 sta_id; | |
2464 | bool is_default_wep_key = false; | |
2465 | ||
2466 | IWL_DEBUG_MAC80211(priv, "enter\n"); | |
2467 | ||
2468 | if (priv->cfg->mod_params->sw_crypto) { | |
2469 | IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); | |
2470 | return -EOPNOTSUPP; | |
2471 | } | |
2472 | addr = sta ? sta->addr : iwl_bcast_addr; | |
2473 | sta_id = iwl_find_station(priv, addr); | |
2474 | if (sta_id == IWL_INVALID_STATION) { | |
2475 | IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", | |
2476 | addr); | |
2477 | return -EINVAL; | |
2478 | ||
2479 | } | |
2480 | ||
2481 | mutex_lock(&priv->mutex); | |
2482 | iwl_scan_cancel_timeout(priv, 100); | |
2483 | mutex_unlock(&priv->mutex); | |
2484 | ||
2485 | /* If we are getting WEP group key and we didn't receive any key mapping | |
2486 | * so far, we are in legacy wep mode (group key only), otherwise we are | |
2487 | * in 1X mode. | |
2488 | * In legacy wep mode, we use another host command to the uCode */ | |
2489 | if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && | |
2490 | priv->iw_mode != NL80211_IFTYPE_AP) { | |
2491 | if (cmd == SET_KEY) | |
2492 | is_default_wep_key = !priv->key_mapping_key; | |
2493 | else | |
2494 | is_default_wep_key = | |
2495 | (key->hw_key_idx == HW_KEY_DEFAULT); | |
2496 | } | |
2497 | ||
2498 | switch (cmd) { | |
2499 | case SET_KEY: | |
2500 | if (is_default_wep_key) | |
2501 | ret = iwl_set_default_wep_key(priv, key); | |
2502 | else | |
2503 | ret = iwl_set_dynamic_key(priv, key, sta_id); | |
2504 | ||
2505 | IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); | |
2506 | break; | |
2507 | case DISABLE_KEY: | |
2508 | if (is_default_wep_key) | |
2509 | ret = iwl_remove_default_wep_key(priv, key); | |
2510 | else | |
2511 | ret = iwl_remove_dynamic_key(priv, key, sta_id); | |
2512 | ||
2513 | IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); | |
2514 | break; | |
2515 | default: | |
2516 | ret = -EINVAL; | |
2517 | } | |
2518 | ||
2519 | IWL_DEBUG_MAC80211(priv, "leave\n"); | |
2520 | ||
2521 | return ret; | |
2522 | } | |
2523 | ||
2524 | static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, | |
2525 | enum ieee80211_ampdu_mlme_action action, | |
2526 | struct ieee80211_sta *sta, u16 tid, u16 *ssn) | |
2527 | { | |
2528 | struct iwl_priv *priv = hw->priv; | |
2529 | int ret; | |
2530 | ||
2531 | IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", | |
2532 | sta->addr, tid); | |
2533 | ||
2534 | if (!(priv->cfg->sku & IWL_SKU_N)) | |
2535 | return -EACCES; | |
2536 | ||
2537 | switch (action) { | |
2538 | case IEEE80211_AMPDU_RX_START: | |
2539 | IWL_DEBUG_HT(priv, "start Rx\n"); | |
2540 | return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); | |
2541 | case IEEE80211_AMPDU_RX_STOP: | |
2542 | IWL_DEBUG_HT(priv, "stop Rx\n"); | |
2543 | ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid); | |
2544 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2545 | return 0; | |
2546 | else | |
2547 | return ret; | |
2548 | case IEEE80211_AMPDU_TX_START: | |
2549 | IWL_DEBUG_HT(priv, "start Tx\n"); | |
2550 | return iwl_tx_agg_start(priv, sta->addr, tid, ssn); | |
2551 | case IEEE80211_AMPDU_TX_STOP: | |
2552 | IWL_DEBUG_HT(priv, "stop Tx\n"); | |
2553 | ret = iwl_tx_agg_stop(priv, sta->addr, tid); | |
2554 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | |
2555 | return 0; | |
2556 | else | |
2557 | return ret; | |
2558 | default: | |
2559 | IWL_DEBUG_HT(priv, "unknown\n"); | |
2560 | return -EINVAL; | |
2561 | break; | |
2562 | } | |
2563 | return 0; | |
2564 | } | |
2565 | ||
2566 | static int iwl_mac_get_stats(struct ieee80211_hw *hw, | |
2567 | struct ieee80211_low_level_stats *stats) | |
2568 | { | |
2569 | struct iwl_priv *priv = hw->priv; | |
2570 | ||
2571 | priv = hw->priv; | |
2572 | IWL_DEBUG_MAC80211(priv, "enter\n"); | |
2573 | IWL_DEBUG_MAC80211(priv, "leave\n"); | |
2574 | ||
2575 | return 0; | |
2576 | } | |
2577 | ||
2578 | /***************************************************************************** | |
2579 | * | |
2580 | * sysfs attributes | |
2581 | * | |
2582 | *****************************************************************************/ | |
2583 | ||
2584 | #ifdef CONFIG_IWLWIFI_DEBUG | |
2585 | ||
2586 | /* | |
2587 | * The following adds a new attribute to the sysfs representation | |
2588 | * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) | |
2589 | * used for controlling the debug level. | |
2590 | * | |
2591 | * See the level definitions in iwl for details. | |
2592 | * | |
2593 | * The debug_level being managed using sysfs below is a per device debug | |
2594 | * level that is used instead of the global debug level if it (the per | |
2595 | * device debug level) is set. | |
2596 | */ | |
2597 | static ssize_t show_debug_level(struct device *d, | |
2598 | struct device_attribute *attr, char *buf) | |
2599 | { | |
2600 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2601 | return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); | |
2602 | } | |
2603 | static ssize_t store_debug_level(struct device *d, | |
2604 | struct device_attribute *attr, | |
2605 | const char *buf, size_t count) | |
2606 | { | |
2607 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2608 | unsigned long val; | |
2609 | int ret; | |
2610 | ||
2611 | ret = strict_strtoul(buf, 0, &val); | |
2612 | if (ret) | |
2613 | IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); | |
2614 | else { | |
2615 | priv->debug_level = val; | |
2616 | if (iwl_alloc_traffic_mem(priv)) | |
2617 | IWL_ERR(priv, | |
2618 | "Not enough memory to generate traffic log\n"); | |
2619 | } | |
2620 | return strnlen(buf, count); | |
2621 | } | |
2622 | ||
2623 | static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, | |
2624 | show_debug_level, store_debug_level); | |
2625 | ||
2626 | ||
2627 | #endif /* CONFIG_IWLWIFI_DEBUG */ | |
2628 | ||
2629 | ||
2630 | static ssize_t show_temperature(struct device *d, | |
2631 | struct device_attribute *attr, char *buf) | |
2632 | { | |
2633 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2634 | ||
2635 | if (!iwl_is_alive(priv)) | |
2636 | return -EAGAIN; | |
2637 | ||
2638 | return sprintf(buf, "%d\n", priv->temperature); | |
2639 | } | |
2640 | ||
2641 | static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); | |
2642 | ||
2643 | static ssize_t show_tx_power(struct device *d, | |
2644 | struct device_attribute *attr, char *buf) | |
2645 | { | |
2646 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2647 | ||
2648 | if (!iwl_is_ready_rf(priv)) | |
2649 | return sprintf(buf, "off\n"); | |
2650 | else | |
2651 | return sprintf(buf, "%d\n", priv->tx_power_user_lmt); | |
2652 | } | |
2653 | ||
2654 | static ssize_t store_tx_power(struct device *d, | |
2655 | struct device_attribute *attr, | |
2656 | const char *buf, size_t count) | |
2657 | { | |
2658 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2659 | unsigned long val; | |
2660 | int ret; | |
2661 | ||
2662 | ret = strict_strtoul(buf, 10, &val); | |
2663 | if (ret) | |
2664 | IWL_INFO(priv, "%s is not in decimal form.\n", buf); | |
2665 | else { | |
2666 | ret = iwl_set_tx_power(priv, val, false); | |
2667 | if (ret) | |
2668 | IWL_ERR(priv, "failed setting tx power (0x%d).\n", | |
2669 | ret); | |
2670 | else | |
2671 | ret = count; | |
2672 | } | |
2673 | return ret; | |
2674 | } | |
2675 | ||
2676 | static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); | |
2677 | ||
2678 | static ssize_t show_flags(struct device *d, | |
2679 | struct device_attribute *attr, char *buf) | |
2680 | { | |
2681 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2682 | ||
2683 | return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); | |
2684 | } | |
2685 | ||
2686 | static ssize_t store_flags(struct device *d, | |
2687 | struct device_attribute *attr, | |
2688 | const char *buf, size_t count) | |
2689 | { | |
2690 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2691 | unsigned long val; | |
2692 | u32 flags; | |
2693 | int ret = strict_strtoul(buf, 0, &val); | |
2694 | if (ret) | |
2695 | return ret; | |
2696 | flags = (u32)val; | |
2697 | ||
2698 | mutex_lock(&priv->mutex); | |
2699 | if (le32_to_cpu(priv->staging_rxon.flags) != flags) { | |
2700 | /* Cancel any currently running scans... */ | |
2701 | if (iwl_scan_cancel_timeout(priv, 100)) | |
2702 | IWL_WARN(priv, "Could not cancel scan.\n"); | |
2703 | else { | |
2704 | IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags); | |
2705 | priv->staging_rxon.flags = cpu_to_le32(flags); | |
2706 | iwlcore_commit_rxon(priv); | |
2707 | } | |
2708 | } | |
2709 | mutex_unlock(&priv->mutex); | |
2710 | ||
2711 | return count; | |
2712 | } | |
2713 | ||
2714 | static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); | |
2715 | ||
2716 | static ssize_t show_filter_flags(struct device *d, | |
2717 | struct device_attribute *attr, char *buf) | |
2718 | { | |
2719 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2720 | ||
2721 | return sprintf(buf, "0x%04X\n", | |
2722 | le32_to_cpu(priv->active_rxon.filter_flags)); | |
2723 | } | |
2724 | ||
2725 | static ssize_t store_filter_flags(struct device *d, | |
2726 | struct device_attribute *attr, | |
2727 | const char *buf, size_t count) | |
2728 | { | |
2729 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2730 | unsigned long val; | |
2731 | u32 filter_flags; | |
2732 | int ret = strict_strtoul(buf, 0, &val); | |
2733 | if (ret) | |
2734 | return ret; | |
2735 | filter_flags = (u32)val; | |
2736 | ||
2737 | mutex_lock(&priv->mutex); | |
2738 | if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { | |
2739 | /* Cancel any currently running scans... */ | |
2740 | if (iwl_scan_cancel_timeout(priv, 100)) | |
2741 | IWL_WARN(priv, "Could not cancel scan.\n"); | |
2742 | else { | |
2743 | IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " | |
2744 | "0x%04X\n", filter_flags); | |
2745 | priv->staging_rxon.filter_flags = | |
2746 | cpu_to_le32(filter_flags); | |
2747 | iwlcore_commit_rxon(priv); | |
2748 | } | |
2749 | } | |
2750 | mutex_unlock(&priv->mutex); | |
2751 | ||
2752 | return count; | |
2753 | } | |
2754 | ||
2755 | static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, | |
2756 | store_filter_flags); | |
2757 | ||
2758 | ||
2759 | static ssize_t show_statistics(struct device *d, | |
2760 | struct device_attribute *attr, char *buf) | |
2761 | { | |
2762 | struct iwl_priv *priv = dev_get_drvdata(d); | |
2763 | u32 size = sizeof(struct iwl_notif_statistics); | |
2764 | u32 len = 0, ofs = 0; | |
2765 | u8 *data = (u8 *)&priv->statistics; | |
2766 | int rc = 0; | |
2767 | ||
2768 | if (!iwl_is_alive(priv)) | |
2769 | return -EAGAIN; | |
2770 | ||
2771 | mutex_lock(&priv->mutex); | |
2772 | rc = iwl_send_statistics_request(priv, 0); | |
2773 | mutex_unlock(&priv->mutex); | |
2774 | ||
2775 | if (rc) { | |
2776 | len = sprintf(buf, | |
2777 | "Error sending statistics request: 0x%08X\n", rc); | |
2778 | return len; | |
2779 | } | |
2780 | ||
2781 | while (size && (PAGE_SIZE - len)) { | |
2782 | hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, | |
2783 | PAGE_SIZE - len, 1); | |
2784 | len = strlen(buf); | |
2785 | if (PAGE_SIZE - len) | |
2786 | buf[len++] = '\n'; | |
2787 | ||
2788 | ofs += 16; | |
2789 | size -= min(size, 16U); | |
2790 | } | |
2791 | ||
2792 | return len; | |
2793 | } | |
2794 | ||
2795 | static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); | |
2796 | ||
2797 | ||
2798 | /***************************************************************************** | |
2799 | * | |
2800 | * driver setup and teardown | |
2801 | * | |
2802 | *****************************************************************************/ | |
2803 | ||
2804 | static void iwl_setup_deferred_work(struct iwl_priv *priv) | |
2805 | { | |
2806 | priv->workqueue = create_singlethread_workqueue(DRV_NAME); | |
2807 | ||
2808 | init_waitqueue_head(&priv->wait_command_queue); | |
2809 | ||
2810 | INIT_WORK(&priv->up, iwl_bg_up); | |
2811 | INIT_WORK(&priv->restart, iwl_bg_restart); | |
2812 | INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); | |
2813 | INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); | |
2814 | INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); | |
2815 | INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); | |
2816 | INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); | |
2817 | ||
2818 | iwl_setup_scan_deferred_work(priv); | |
2819 | ||
2820 | if (priv->cfg->ops->lib->setup_deferred_work) | |
2821 | priv->cfg->ops->lib->setup_deferred_work(priv); | |
2822 | ||
2823 | init_timer(&priv->statistics_periodic); | |
2824 | priv->statistics_periodic.data = (unsigned long)priv; | |
2825 | priv->statistics_periodic.function = iwl_bg_statistics_periodic; | |
2826 | ||
2827 | if (!priv->cfg->use_isr_legacy) | |
2828 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | |
2829 | iwl_irq_tasklet, (unsigned long)priv); | |
2830 | else | |
2831 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | |
2832 | iwl_irq_tasklet_legacy, (unsigned long)priv); | |
2833 | } | |
2834 | ||
2835 | static void iwl_cancel_deferred_work(struct iwl_priv *priv) | |
2836 | { | |
2837 | if (priv->cfg->ops->lib->cancel_deferred_work) | |
2838 | priv->cfg->ops->lib->cancel_deferred_work(priv); | |
2839 | ||
2840 | cancel_delayed_work_sync(&priv->init_alive_start); | |
2841 | cancel_delayed_work(&priv->scan_check); | |
2842 | cancel_delayed_work(&priv->alive_start); | |
2843 | cancel_work_sync(&priv->beacon_update); | |
2844 | del_timer_sync(&priv->statistics_periodic); | |
2845 | } | |
2846 | ||
2847 | static struct attribute *iwl_sysfs_entries[] = { | |
2848 | &dev_attr_flags.attr, | |
2849 | &dev_attr_filter_flags.attr, | |
2850 | &dev_attr_statistics.attr, | |
2851 | &dev_attr_temperature.attr, | |
2852 | &dev_attr_tx_power.attr, | |
2853 | #ifdef CONFIG_IWLWIFI_DEBUG | |
2854 | &dev_attr_debug_level.attr, | |
2855 | #endif | |
2856 | NULL | |
2857 | }; | |
2858 | ||
2859 | static struct attribute_group iwl_attribute_group = { | |
2860 | .name = NULL, /* put in device directory */ | |
2861 | .attrs = iwl_sysfs_entries, | |
2862 | }; | |
2863 | ||
2864 | static struct ieee80211_ops iwl_hw_ops = { | |
2865 | .tx = iwl_mac_tx, | |
2866 | .start = iwl_mac_start, | |
2867 | .stop = iwl_mac_stop, | |
2868 | .add_interface = iwl_mac_add_interface, | |
2869 | .remove_interface = iwl_mac_remove_interface, | |
2870 | .config = iwl_mac_config, | |
2871 | .configure_filter = iwl_configure_filter, | |
2872 | .set_key = iwl_mac_set_key, | |
2873 | .update_tkip_key = iwl_mac_update_tkip_key, | |
2874 | .get_stats = iwl_mac_get_stats, | |
2875 | .get_tx_stats = iwl_mac_get_tx_stats, | |
2876 | .conf_tx = iwl_mac_conf_tx, | |
2877 | .reset_tsf = iwl_mac_reset_tsf, | |
2878 | .bss_info_changed = iwl_bss_info_changed, | |
2879 | .ampdu_action = iwl_mac_ampdu_action, | |
2880 | .hw_scan = iwl_mac_hw_scan | |
2881 | }; | |
2882 | ||
2883 | static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
2884 | { | |
2885 | int err = 0; | |
2886 | struct iwl_priv *priv; | |
2887 | struct ieee80211_hw *hw; | |
2888 | struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); | |
2889 | unsigned long flags; | |
2890 | u16 pci_cmd; | |
2891 | ||
2892 | /************************ | |
2893 | * 1. Allocating HW data | |
2894 | ************************/ | |
2895 | ||
2896 | /* Disabling hardware scan means that mac80211 will perform scans | |
2897 | * "the hard way", rather than using device's scan. */ | |
2898 | if (cfg->mod_params->disable_hw_scan) { | |
2899 | if (iwl_debug_level & IWL_DL_INFO) | |
2900 | dev_printk(KERN_DEBUG, &(pdev->dev), | |
2901 | "Disabling hw_scan\n"); | |
2902 | iwl_hw_ops.hw_scan = NULL; | |
2903 | } | |
2904 | ||
2905 | hw = iwl_alloc_all(cfg, &iwl_hw_ops); | |
2906 | if (!hw) { | |
2907 | err = -ENOMEM; | |
2908 | goto out; | |
2909 | } | |
2910 | priv = hw->priv; | |
2911 | /* At this point both hw and priv are allocated. */ | |
2912 | ||
2913 | SET_IEEE80211_DEV(hw, &pdev->dev); | |
2914 | ||
2915 | IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); | |
2916 | priv->cfg = cfg; | |
2917 | priv->pci_dev = pdev; | |
2918 | priv->inta_mask = CSR_INI_SET_MASK; | |
2919 | ||
2920 | #ifdef CONFIG_IWLWIFI_DEBUG | |
2921 | atomic_set(&priv->restrict_refcnt, 0); | |
2922 | #endif | |
2923 | if (iwl_alloc_traffic_mem(priv)) | |
2924 | IWL_ERR(priv, "Not enough memory to generate traffic log\n"); | |
2925 | ||
2926 | /************************** | |
2927 | * 2. Initializing PCI bus | |
2928 | **************************/ | |
2929 | if (pci_enable_device(pdev)) { | |
2930 | err = -ENODEV; | |
2931 | goto out_ieee80211_free_hw; | |
2932 | } | |
2933 | ||
2934 | pci_set_master(pdev); | |
2935 | ||
2936 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | |
2937 | if (!err) | |
2938 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); | |
2939 | if (err) { | |
2940 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2941 | if (!err) | |
2942 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
2943 | /* both attempts failed: */ | |
2944 | if (err) { | |
2945 | IWL_WARN(priv, "No suitable DMA available.\n"); | |
2946 | goto out_pci_disable_device; | |
2947 | } | |
2948 | } | |
2949 | ||
2950 | err = pci_request_regions(pdev, DRV_NAME); | |
2951 | if (err) | |
2952 | goto out_pci_disable_device; | |
2953 | ||
2954 | pci_set_drvdata(pdev, priv); | |
2955 | ||
2956 | ||
2957 | /*********************** | |
2958 | * 3. Read REV register | |
2959 | ***********************/ | |
2960 | priv->hw_base = pci_iomap(pdev, 0, 0); | |
2961 | if (!priv->hw_base) { | |
2962 | err = -ENODEV; | |
2963 | goto out_pci_release_regions; | |
2964 | } | |
2965 | ||
2966 | IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", | |
2967 | (unsigned long long) pci_resource_len(pdev, 0)); | |
2968 | IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); | |
2969 | ||
2970 | /* this spin lock will be used in apm_ops.init and EEPROM access | |
2971 | * we should init now | |
2972 | */ | |
2973 | spin_lock_init(&priv->reg_lock); | |
2974 | iwl_hw_detect(priv); | |
2975 | IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", | |
2976 | priv->cfg->name, priv->hw_rev); | |
2977 | ||
2978 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | |
2979 | * PCI Tx retries from interfering with C3 CPU state */ | |
2980 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | |
2981 | ||
2982 | iwl_prepare_card_hw(priv); | |
2983 | if (!priv->hw_ready) { | |
2984 | IWL_WARN(priv, "Failed, HW not ready\n"); | |
2985 | goto out_iounmap; | |
2986 | } | |
2987 | ||
2988 | /* amp init */ | |
2989 | err = priv->cfg->ops->lib->apm_ops.init(priv); | |
2990 | if (err < 0) { | |
2991 | IWL_ERR(priv, "Failed to init APMG\n"); | |
2992 | goto out_iounmap; | |
2993 | } | |
2994 | /***************** | |
2995 | * 4. Read EEPROM | |
2996 | *****************/ | |
2997 | /* Read the EEPROM */ | |
2998 | err = iwl_eeprom_init(priv); | |
2999 | if (err) { | |
3000 | IWL_ERR(priv, "Unable to init EEPROM\n"); | |
3001 | goto out_iounmap; | |
3002 | } | |
3003 | err = iwl_eeprom_check_version(priv); | |
3004 | if (err) | |
3005 | goto out_free_eeprom; | |
3006 | ||
3007 | /* extract MAC Address */ | |
3008 | iwl_eeprom_get_mac(priv, priv->mac_addr); | |
3009 | IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->mac_addr); | |
3010 | SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); | |
3011 | ||
3012 | /************************ | |
3013 | * 5. Setup HW constants | |
3014 | ************************/ | |
3015 | if (iwl_set_hw_params(priv)) { | |
3016 | IWL_ERR(priv, "failed to set hw parameters\n"); | |
3017 | goto out_free_eeprom; | |
3018 | } | |
3019 | ||
3020 | /******************* | |
3021 | * 6. Setup priv | |
3022 | *******************/ | |
3023 | ||
3024 | err = iwl_init_drv(priv); | |
3025 | if (err) | |
3026 | goto out_free_eeprom; | |
3027 | /* At this point both hw and priv are initialized. */ | |
3028 | ||
3029 | /******************** | |
3030 | * 7. Setup services | |
3031 | ********************/ | |
3032 | spin_lock_irqsave(&priv->lock, flags); | |
3033 | iwl_disable_interrupts(priv); | |
3034 | spin_unlock_irqrestore(&priv->lock, flags); | |
3035 | ||
3036 | pci_enable_msi(priv->pci_dev); | |
3037 | ||
3038 | iwl_alloc_isr_ict(priv); | |
3039 | err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, | |
3040 | IRQF_SHARED, DRV_NAME, priv); | |
3041 | if (err) { | |
3042 | IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); | |
3043 | goto out_disable_msi; | |
3044 | } | |
3045 | err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); | |
3046 | if (err) { | |
3047 | IWL_ERR(priv, "failed to create sysfs device attributes\n"); | |
3048 | goto out_free_irq; | |
3049 | } | |
3050 | ||
3051 | iwl_setup_deferred_work(priv); | |
3052 | iwl_setup_rx_handlers(priv); | |
3053 | ||
3054 | /********************************** | |
3055 | * 8. Setup and register mac80211 | |
3056 | **********************************/ | |
3057 | ||
3058 | /* enable interrupts if needed: hw bug w/a */ | |
3059 | pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); | |
3060 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { | |
3061 | pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; | |
3062 | pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); | |
3063 | } | |
3064 | ||
3065 | iwl_enable_interrupts(priv); | |
3066 | ||
3067 | err = iwl_setup_mac(priv); | |
3068 | if (err) | |
3069 | goto out_remove_sysfs; | |
3070 | ||
3071 | err = iwl_dbgfs_register(priv, DRV_NAME); | |
3072 | if (err) | |
3073 | IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); | |
3074 | ||
3075 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
3076 | if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | |
3077 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | |
3078 | else | |
3079 | set_bit(STATUS_RF_KILL_HW, &priv->status); | |
3080 | ||
3081 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | |
3082 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | |
3083 | ||
3084 | iwl_power_initialize(priv); | |
3085 | iwl_tt_initialize(priv); | |
3086 | return 0; | |
3087 | ||
3088 | out_remove_sysfs: | |
3089 | destroy_workqueue(priv->workqueue); | |
3090 | priv->workqueue = NULL; | |
3091 | sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); | |
3092 | out_free_irq: | |
3093 | free_irq(priv->pci_dev->irq, priv); | |
3094 | iwl_free_isr_ict(priv); | |
3095 | out_disable_msi: | |
3096 | pci_disable_msi(priv->pci_dev); | |
3097 | iwl_uninit_drv(priv); | |
3098 | out_free_eeprom: | |
3099 | iwl_eeprom_free(priv); | |
3100 | out_iounmap: | |
3101 | pci_iounmap(pdev, priv->hw_base); | |
3102 | out_pci_release_regions: | |
3103 | pci_set_drvdata(pdev, NULL); | |
3104 | pci_release_regions(pdev); | |
3105 | out_pci_disable_device: | |
3106 | pci_disable_device(pdev); | |
3107 | out_ieee80211_free_hw: | |
3108 | ieee80211_free_hw(priv->hw); | |
3109 | iwl_free_traffic_mem(priv); | |
3110 | out: | |
3111 | return err; | |
3112 | } | |
3113 | ||
3114 | static void __devexit iwl_pci_remove(struct pci_dev *pdev) | |
3115 | { | |
3116 | struct iwl_priv *priv = pci_get_drvdata(pdev); | |
3117 | unsigned long flags; | |
3118 | ||
3119 | if (!priv) | |
3120 | return; | |
3121 | ||
3122 | IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); | |
3123 | ||
3124 | iwl_dbgfs_unregister(priv); | |
3125 | sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); | |
3126 | ||
3127 | /* ieee80211_unregister_hw call wil cause iwl_mac_stop to | |
3128 | * to be called and iwl_down since we are removing the device | |
3129 | * we need to set STATUS_EXIT_PENDING bit. | |
3130 | */ | |
3131 | set_bit(STATUS_EXIT_PENDING, &priv->status); | |
3132 | if (priv->mac80211_registered) { | |
3133 | ieee80211_unregister_hw(priv->hw); | |
3134 | priv->mac80211_registered = 0; | |
3135 | } else { | |
3136 | iwl_down(priv); | |
3137 | } | |
3138 | ||
3139 | iwl_tt_exit(priv); | |
3140 | ||
3141 | /* make sure we flush any pending irq or | |
3142 | * tasklet for the driver | |
3143 | */ | |
3144 | spin_lock_irqsave(&priv->lock, flags); | |
3145 | iwl_disable_interrupts(priv); | |
3146 | spin_unlock_irqrestore(&priv->lock, flags); | |
3147 | ||
3148 | iwl_synchronize_irq(priv); | |
3149 | ||
3150 | iwl_dealloc_ucode_pci(priv); | |
3151 | ||
3152 | if (priv->rxq.bd) | |
3153 | iwl_rx_queue_free(priv, &priv->rxq); | |
3154 | iwl_hw_txq_ctx_free(priv); | |
3155 | ||
3156 | iwl_clear_stations_table(priv); | |
3157 | iwl_eeprom_free(priv); | |
3158 | ||
3159 | ||
3160 | /*netif_stop_queue(dev); */ | |
3161 | flush_workqueue(priv->workqueue); | |
3162 | ||
3163 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes | |
3164 | * priv->workqueue... so we can't take down the workqueue | |
3165 | * until now... */ | |
3166 | destroy_workqueue(priv->workqueue); | |
3167 | priv->workqueue = NULL; | |
3168 | iwl_free_traffic_mem(priv); | |
3169 | ||
3170 | free_irq(priv->pci_dev->irq, priv); | |
3171 | pci_disable_msi(priv->pci_dev); | |
3172 | pci_iounmap(pdev, priv->hw_base); | |
3173 | pci_release_regions(pdev); | |
3174 | pci_disable_device(pdev); | |
3175 | pci_set_drvdata(pdev, NULL); | |
3176 | ||
3177 | iwl_uninit_drv(priv); | |
3178 | ||
3179 | iwl_free_isr_ict(priv); | |
3180 | ||
3181 | if (priv->ibss_beacon) | |
3182 | dev_kfree_skb(priv->ibss_beacon); | |
3183 | ||
3184 | ieee80211_free_hw(priv->hw); | |
3185 | } | |
3186 | ||
3187 | ||
3188 | /***************************************************************************** | |
3189 | * | |
3190 | * driver and module entry point | |
3191 | * | |
3192 | *****************************************************************************/ | |
3193 | ||
3194 | /* Hardware specific file defines the PCI IDs table for that hardware module */ | |
3195 | static struct pci_device_id iwl_hw_card_ids[] = { | |
3196 | #ifdef CONFIG_IWL4965 | |
3197 | {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, | |
3198 | {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, | |
3199 | #endif /* CONFIG_IWL4965 */ | |
3200 | #ifdef CONFIG_IWL5000 | |
3201 | {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)}, | |
3202 | {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)}, | |
3203 | {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, | |
3204 | {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, | |
3205 | {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, | |
3206 | {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, | |
3207 | {IWL_PCI_DEVICE(0x4232, PCI_ANY_ID, iwl5100_agn_cfg)}, | |
3208 | {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)}, | |
3209 | {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)}, | |
3210 | {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)}, | |
3211 | /* 5350 WiFi/WiMax */ | |
3212 | {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, | |
3213 | {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, | |
3214 | {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, | |
3215 | /* 5150 Wifi/WiMax */ | |
3216 | {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)}, | |
3217 | {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)}, | |
3218 | /* 6000/6050 Series */ | |
3219 | {IWL_PCI_DEVICE(0x008D, PCI_ANY_ID, iwl6000h_2agn_cfg)}, | |
3220 | {IWL_PCI_DEVICE(0x008E, PCI_ANY_ID, iwl6000h_2agn_cfg)}, | |
3221 | {IWL_PCI_DEVICE(0x422B, PCI_ANY_ID, iwl6000_3agn_cfg)}, | |
3222 | {IWL_PCI_DEVICE(0x422C, PCI_ANY_ID, iwl6000i_2agn_cfg)}, | |
3223 | {IWL_PCI_DEVICE(0x4238, PCI_ANY_ID, iwl6000_3agn_cfg)}, | |
3224 | {IWL_PCI_DEVICE(0x4239, PCI_ANY_ID, iwl6000i_2agn_cfg)}, | |
3225 | {IWL_PCI_DEVICE(0x0086, PCI_ANY_ID, iwl6050_3agn_cfg)}, | |
3226 | {IWL_PCI_DEVICE(0x0087, PCI_ANY_ID, iwl6050_2agn_cfg)}, | |
3227 | {IWL_PCI_DEVICE(0x0088, PCI_ANY_ID, iwl6050_3agn_cfg)}, | |
3228 | {IWL_PCI_DEVICE(0x0089, PCI_ANY_ID, iwl6050_2agn_cfg)}, | |
3229 | /* 1000 Series WiFi */ | |
3230 | {IWL_PCI_DEVICE(0x0083, PCI_ANY_ID, iwl1000_bgn_cfg)}, | |
3231 | {IWL_PCI_DEVICE(0x0084, PCI_ANY_ID, iwl1000_bgn_cfg)}, | |
3232 | #endif /* CONFIG_IWL5000 */ | |
3233 | ||
3234 | {0} | |
3235 | }; | |
3236 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); | |
3237 | ||
3238 | static struct pci_driver iwl_driver = { | |
3239 | .name = DRV_NAME, | |
3240 | .id_table = iwl_hw_card_ids, | |
3241 | .probe = iwl_pci_probe, | |
3242 | .remove = __devexit_p(iwl_pci_remove), | |
3243 | #ifdef CONFIG_PM | |
3244 | .suspend = iwl_pci_suspend, | |
3245 | .resume = iwl_pci_resume, | |
3246 | #endif | |
3247 | }; | |
3248 | ||
3249 | static int __init iwl_init(void) | |
3250 | { | |
3251 | ||
3252 | int ret; | |
3253 | printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); | |
3254 | printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); | |
3255 | ||
3256 | ret = iwlagn_rate_control_register(); | |
3257 | if (ret) { | |
3258 | printk(KERN_ERR DRV_NAME | |
3259 | "Unable to register rate control algorithm: %d\n", ret); | |
3260 | return ret; | |
3261 | } | |
3262 | ||
3263 | ret = pci_register_driver(&iwl_driver); | |
3264 | if (ret) { | |
3265 | printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n"); | |
3266 | goto error_register; | |
3267 | } | |
3268 | ||
3269 | return ret; | |
3270 | ||
3271 | error_register: | |
3272 | iwlagn_rate_control_unregister(); | |
3273 | return ret; | |
3274 | } | |
3275 | ||
3276 | static void __exit iwl_exit(void) | |
3277 | { | |
3278 | pci_unregister_driver(&iwl_driver); | |
3279 | iwlagn_rate_control_unregister(); | |
3280 | } | |
3281 | ||
3282 | module_exit(iwl_exit); | |
3283 | module_init(iwl_init); | |
3284 | ||
3285 | #ifdef CONFIG_IWLWIFI_DEBUG | |
3286 | module_param_named(debug50, iwl_debug_level, uint, 0444); | |
3287 | MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)"); | |
3288 | module_param_named(debug, iwl_debug_level, uint, 0644); | |
3289 | MODULE_PARM_DESC(debug, "debug output mask"); | |
3290 | #endif | |
3291 |