1 /******************************************************************************
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
21 * Contact Information:
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *****************************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/version.h>
30 #include <linux/init.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/delay.h>
34 #include <linux/skbuff.h>
35 #include <linux/netdevice.h>
36 #include <linux/wireless.h>
37 #include <net/mac80211.h>
38 #include <linux/etherdevice.h>
39 #include <asm/unaligned.h>
42 #include "iwl-helpers.h"
44 static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv);
46 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
47 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
48 IWL_RATE_SISO_##s##M_PLCP, \
49 IWL_RATE_MIMO_##s##M_PLCP, \
50 IWL_RATE_##r##M_IEEE, \
51 IWL_RATE_##ip##M_INDEX, \
52 IWL_RATE_##in##M_INDEX, \
53 IWL_RATE_##rp##M_INDEX, \
54 IWL_RATE_##rn##M_INDEX, \
55 IWL_RATE_##pp##M_INDEX, \
56 IWL_RATE_##np##M_INDEX }
60 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
62 * If there isn't a valid next or previous rate then INV is used which
63 * maps to IWL_RATE_INVALID
66 const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
67 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
68 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
69 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
70 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
71 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
72 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
73 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
74 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
75 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
76 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
77 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
78 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
82 #ifdef CONFIG_IWL4965_HT
84 static const u16 default_tid_to_tx_fifo[] = {
104 #endif /*CONFIG_IWL4965_HT */
106 static int is_fat_channel(__le32 rxon_flags)
108 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
109 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
112 static u8 is_single_stream(struct iwl4965_priv *priv)
114 #ifdef CONFIG_IWL4965_HT
115 if (!priv->current_ht_config.is_ht ||
116 (priv->current_ht_config.supp_mcs_set[1] == 0) ||
117 (priv->ps_mode == IWL_MIMO_PS_STATIC))
121 #endif /*CONFIG_IWL4965_HT */
125 int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
129 /* 4965 HT rate format */
130 if (rate_n_flags & RATE_MCS_HT_MSK) {
131 idx = (rate_n_flags & 0xff);
133 if (idx >= IWL_RATE_MIMO_6M_PLCP)
134 idx = idx - IWL_RATE_MIMO_6M_PLCP;
136 idx += IWL_FIRST_OFDM_RATE;
137 /* skip 9M not supported in ht*/
138 if (idx >= IWL_RATE_9M_INDEX)
140 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
143 /* 4965 legacy rate format, search for match in table */
145 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
146 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
154 * translate ucode response to mac80211 tx status control values
156 void iwl4965_hwrate_to_tx_control(struct iwl4965_priv *priv, u32 rate_n_flags,
157 struct ieee80211_tx_control *control)
161 control->antenna_sel_tx =
162 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_A_POS);
163 if (rate_n_flags & RATE_MCS_HT_MSK)
164 control->flags |= IEEE80211_TXCTL_OFDM_HT;
165 if (rate_n_flags & RATE_MCS_GF_MSK)
166 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
167 if (rate_n_flags & RATE_MCS_FAT_MSK)
168 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
169 if (rate_n_flags & RATE_MCS_DUP_MSK)
170 control->flags |= IEEE80211_TXCTL_DUP_DATA;
171 if (rate_n_flags & RATE_MCS_SGI_MSK)
172 control->flags |= IEEE80211_TXCTL_SHORT_GI;
173 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
174 * IEEE80211_BAND_2GHZ band as it contains all the rates */
175 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
176 if (rate_index == -1)
177 control->tx_rate = NULL;
180 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
184 * Determine how many receiver/antenna chains to use.
185 * More provides better reception via diversity. Fewer saves power.
186 * MIMO (dual stream) requires at least 2, but works better with 3.
187 * This does not determine *which* chains to use, just how many.
189 static int iwl4965_get_rx_chain_counter(struct iwl4965_priv *priv,
190 u8 *idle_state, u8 *rx_state)
192 u8 is_single = is_single_stream(priv);
193 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
195 /* # of Rx chains to use when expecting MIMO. */
196 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
201 /* # Rx chains when idling and maybe trying to save power */
202 switch (priv->ps_mode) {
203 case IWL_MIMO_PS_STATIC:
204 case IWL_MIMO_PS_DYNAMIC:
205 *idle_state = (is_cam) ? 2 : 1;
207 case IWL_MIMO_PS_NONE:
208 *idle_state = (is_cam) ? *rx_state : 1;
218 int iwl4965_hw_rxq_stop(struct iwl4965_priv *priv)
223 spin_lock_irqsave(&priv->lock, flags);
224 rc = iwl4965_grab_nic_access(priv);
226 spin_unlock_irqrestore(&priv->lock, flags);
231 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
232 rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
235 IWL_ERROR("Can't stop Rx DMA.\n");
237 iwl4965_release_nic_access(priv);
238 spin_unlock_irqrestore(&priv->lock, flags);
243 u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *addr)
247 int ret = IWL_INVALID_STATION;
249 DECLARE_MAC_BUF(mac);
251 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
252 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
255 if (is_broadcast_ether_addr(addr))
256 return IWL4965_BROADCAST_ID;
258 spin_lock_irqsave(&priv->sta_lock, flags);
259 for (i = start; i < priv->hw_setting.max_stations; i++)
260 if ((priv->stations[i].used) &&
262 (priv->stations[i].sta.sta.addr, addr))) {
267 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
268 print_mac(mac, addr), priv->num_stations);
271 spin_unlock_irqrestore(&priv->sta_lock, flags);
275 static int iwl4965_nic_set_pwr_src(struct iwl4965_priv *priv, int pwr_max)
280 spin_lock_irqsave(&priv->lock, flags);
281 ret = iwl4965_grab_nic_access(priv);
283 spin_unlock_irqrestore(&priv->lock, flags);
290 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
293 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
294 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
295 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
296 ~APMG_PS_CTRL_MSK_PWR_SRC);
298 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
299 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
300 ~APMG_PS_CTRL_MSK_PWR_SRC);
302 iwl4965_release_nic_access(priv);
303 spin_unlock_irqrestore(&priv->lock, flags);
308 static int iwl4965_rx_init(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
312 unsigned int rb_size;
314 spin_lock_irqsave(&priv->lock, flags);
315 rc = iwl4965_grab_nic_access(priv);
317 spin_unlock_irqrestore(&priv->lock, flags);
321 if (iwl4965_param_amsdu_size_8K)
322 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
324 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
327 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
329 /* Reset driver's Rx queue write index */
330 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
332 /* Tell device where to find RBD circular buffer in DRAM */
333 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
336 /* Tell device where in DRAM to update its Rx status */
337 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
338 (priv->hw_setting.shared_phys +
339 offsetof(struct iwl4965_shared, val0)) >> 4);
341 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
342 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
343 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
344 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
347 (RX_QUEUE_SIZE_LOG <<
348 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
351 * iwl4965_write32(priv,CSR_INT_COAL_REG,0);
354 iwl4965_release_nic_access(priv);
355 spin_unlock_irqrestore(&priv->lock, flags);
360 /* Tell 4965 where to find the "keep warm" buffer */
361 static int iwl4965_kw_init(struct iwl4965_priv *priv)
366 spin_lock_irqsave(&priv->lock, flags);
367 rc = iwl4965_grab_nic_access(priv);
371 iwl4965_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
372 priv->kw.dma_addr >> 4);
373 iwl4965_release_nic_access(priv);
375 spin_unlock_irqrestore(&priv->lock, flags);
379 static int iwl4965_kw_alloc(struct iwl4965_priv *priv)
381 struct pci_dev *dev = priv->pci_dev;
382 struct iwl4965_kw *kw = &priv->kw;
384 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
385 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
392 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
396 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv.
398 * Does not set up a command, or touch hardware.
400 int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv,
401 enum ieee80211_band band, u16 channel,
402 const struct iwl4965_eeprom_channel *eeprom_ch,
403 u8 fat_extension_channel)
405 struct iwl4965_channel_info *ch_info;
407 ch_info = (struct iwl4965_channel_info *)
408 iwl4965_get_channel_info(priv, band, channel);
410 if (!is_channel_valid(ch_info))
413 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
414 " %ddBm): Ad-Hoc %ssupported\n",
416 is_channel_a_band(ch_info) ?
418 CHECK_AND_PRINT(IBSS),
419 CHECK_AND_PRINT(ACTIVE),
420 CHECK_AND_PRINT(RADAR),
421 CHECK_AND_PRINT(WIDE),
422 CHECK_AND_PRINT(NARROW),
423 CHECK_AND_PRINT(DFS),
425 eeprom_ch->max_power_avg,
426 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
427 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
430 ch_info->fat_eeprom = *eeprom_ch;
431 ch_info->fat_max_power_avg = eeprom_ch->max_power_avg;
432 ch_info->fat_curr_txpow = eeprom_ch->max_power_avg;
433 ch_info->fat_min_power = 0;
434 ch_info->fat_scan_power = eeprom_ch->max_power_avg;
435 ch_info->fat_flags = eeprom_ch->flags;
436 ch_info->fat_extension_channel = fat_extension_channel;
442 * iwl4965_kw_free - Free the "keep warm" buffer
444 static void iwl4965_kw_free(struct iwl4965_priv *priv)
446 struct pci_dev *dev = priv->pci_dev;
447 struct iwl4965_kw *kw = &priv->kw;
450 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
451 memset(kw, 0, sizeof(*kw));
456 * iwl4965_txq_ctx_reset - Reset TX queue context
457 * Destroys all DMA structures and initialise them again
462 static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
465 int txq_id, slots_num;
468 iwl4965_kw_free(priv);
470 /* Free all tx/cmd queues and keep-warm buffer */
471 iwl4965_hw_txq_ctx_free(priv);
473 /* Alloc keep-warm buffer */
474 rc = iwl4965_kw_alloc(priv);
476 IWL_ERROR("Keep Warm allocation failed");
480 spin_lock_irqsave(&priv->lock, flags);
482 rc = iwl4965_grab_nic_access(priv);
484 IWL_ERROR("TX reset failed");
485 spin_unlock_irqrestore(&priv->lock, flags);
489 /* Turn off all Tx DMA channels */
490 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0);
491 iwl4965_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->lock, flags);
494 /* Tell 4965 where to find the keep-warm buffer */
495 rc = iwl4965_kw_init(priv);
497 IWL_ERROR("kw_init failed\n");
501 /* Alloc and init all (default 16) Tx queues,
502 * including the command queue (#4) */
503 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
504 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
505 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
506 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
509 IWL_ERROR("Tx %d queue init failed\n", txq_id);
517 iwl4965_hw_txq_ctx_free(priv);
519 iwl4965_kw_free(priv);
524 int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
528 struct iwl4965_rx_queue *rxq = &priv->rxq;
533 iwl4965_power_init_handle(priv);
536 spin_lock_irqsave(&priv->lock, flags);
538 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
539 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
541 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
542 rc = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
543 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
544 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
546 spin_unlock_irqrestore(&priv->lock, flags);
547 IWL_DEBUG_INFO("Failed to init the card\n");
551 rc = iwl4965_grab_nic_access(priv);
553 spin_unlock_irqrestore(&priv->lock, flags);
557 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
559 iwl4965_write_prph(priv, APMG_CLK_CTRL_REG,
560 APMG_CLK_VAL_DMA_CLK_RQT |
561 APMG_CLK_VAL_BSM_CLK_RQT);
562 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
566 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
567 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
569 iwl4965_release_nic_access(priv);
570 iwl4965_write32(priv, CSR_INT_COALESCING, 512 / 32);
571 spin_unlock_irqrestore(&priv->lock, flags);
573 /* Determine HW type */
574 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
578 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
580 iwl4965_nic_set_pwr_src(priv, 1);
581 spin_lock_irqsave(&priv->lock, flags);
583 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) {
584 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
585 /* Enable No Snoop field */
586 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
590 spin_unlock_irqrestore(&priv->lock, flags);
592 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
593 IWL_ERROR("Older EEPROM detected! Aborting.\n");
597 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
599 /* disable L1 entry -- workaround for pre-B1 */
600 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
602 spin_lock_irqsave(&priv->lock, flags);
604 /* set CSR_HW_CONFIG_REG for uCode use */
606 iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG,
607 CSR49_HW_IF_CONFIG_REG_BIT_4965_R |
608 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI |
609 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
611 rc = iwl4965_grab_nic_access(priv);
613 spin_unlock_irqrestore(&priv->lock, flags);
614 IWL_DEBUG_INFO("Failed to init the card\n");
618 iwl4965_read_prph(priv, APMG_PS_CTRL_REG);
619 iwl4965_set_bits_prph(priv, APMG_PS_CTRL_REG,
620 APMG_PS_CTRL_VAL_RESET_REQ);
622 iwl4965_clear_bits_prph(priv, APMG_PS_CTRL_REG,
623 APMG_PS_CTRL_VAL_RESET_REQ);
625 iwl4965_release_nic_access(priv);
626 spin_unlock_irqrestore(&priv->lock, flags);
628 iwl4965_hw_card_show_info(priv);
632 /* Allocate the RX queue, or reset if it is already allocated */
634 rc = iwl4965_rx_queue_alloc(priv);
636 IWL_ERROR("Unable to initialize Rx queue\n");
640 iwl4965_rx_queue_reset(priv, rxq);
642 iwl4965_rx_replenish(priv);
644 iwl4965_rx_init(priv, rxq);
646 spin_lock_irqsave(&priv->lock, flags);
648 rxq->need_update = 1;
649 iwl4965_rx_queue_update_write_ptr(priv, rxq);
651 spin_unlock_irqrestore(&priv->lock, flags);
653 /* Allocate and init all Tx and Command queues */
654 rc = iwl4965_txq_ctx_reset(priv);
658 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
659 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
661 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
662 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
664 set_bit(STATUS_INIT, &priv->status);
669 int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv)
675 spin_lock_irqsave(&priv->lock, flags);
677 /* set stop master bit */
678 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
680 reg_val = iwl4965_read32(priv, CSR_GP_CNTRL);
682 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
683 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
684 IWL_DEBUG_INFO("Card in power save, master is already "
687 rc = iwl4965_poll_bit(priv, CSR_RESET,
688 CSR_RESET_REG_FLAG_MASTER_DISABLED,
689 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
691 spin_unlock_irqrestore(&priv->lock, flags);
696 spin_unlock_irqrestore(&priv->lock, flags);
697 IWL_DEBUG_INFO("stop master\n");
703 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
705 void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv)
711 /* Stop each Tx DMA channel, and wait for it to be idle */
712 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
713 spin_lock_irqsave(&priv->lock, flags);
714 if (iwl4965_grab_nic_access(priv)) {
715 spin_unlock_irqrestore(&priv->lock, flags);
719 iwl4965_write_direct32(priv,
720 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
722 iwl4965_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
723 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
725 iwl4965_release_nic_access(priv);
726 spin_unlock_irqrestore(&priv->lock, flags);
729 /* Deallocate memory for all Tx queues */
730 iwl4965_hw_txq_ctx_free(priv);
733 int iwl4965_hw_nic_reset(struct iwl4965_priv *priv)
738 iwl4965_hw_nic_stop_master(priv);
740 spin_lock_irqsave(&priv->lock, flags);
742 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
746 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
747 rc = iwl4965_poll_bit(priv, CSR_RESET,
748 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
749 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
753 rc = iwl4965_grab_nic_access(priv);
755 iwl4965_write_prph(priv, APMG_CLK_EN_REG,
756 APMG_CLK_VAL_DMA_CLK_RQT |
757 APMG_CLK_VAL_BSM_CLK_RQT);
761 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
762 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
764 iwl4965_release_nic_access(priv);
767 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
768 wake_up_interruptible(&priv->wait_command_queue);
770 spin_unlock_irqrestore(&priv->lock, flags);
776 #define REG_RECALIB_PERIOD (60)
779 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
781 * This callback is provided in order to queue the statistics_work
782 * in work_queue context (v. softirq)
784 * This timer function is continually reset to execute within
785 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
786 * was received. We need to ensure we receive the statistics in order
787 * to update the temperature used for calibrating the TXPOWER. However,
788 * we can't send the statistics command from softirq context (which
789 * is the context which timers run at) so we have to queue off the
790 * statistics_work to actually send the command to the hardware.
792 static void iwl4965_bg_statistics_periodic(unsigned long data)
794 struct iwl4965_priv *priv = (struct iwl4965_priv *)data;
796 queue_work(priv->workqueue, &priv->statistics_work);
800 * iwl4965_bg_statistics_work - Send the statistics request to the hardware.
802 * This is queued by iwl4965_bg_statistics_periodic.
804 static void iwl4965_bg_statistics_work(struct work_struct *work)
806 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
809 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
812 mutex_lock(&priv->mutex);
813 iwl4965_send_statistics_request(priv);
814 mutex_unlock(&priv->mutex);
817 #define CT_LIMIT_CONST 259
818 #define TM_CT_KILL_THRESHOLD 110
820 void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv)
822 struct iwl4965_ct_kill_config cmd;
825 u32 crit_temperature;
829 spin_lock_irqsave(&priv->lock, flags);
830 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
831 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
832 spin_unlock_irqrestore(&priv->lock, flags);
834 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
835 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
836 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
837 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
839 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
840 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
841 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
844 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
846 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
847 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
848 rc = iwl4965_send_cmd_pdu(priv,
849 REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd);
851 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
853 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
856 #ifdef CONFIG_IWL4965_SENSITIVITY
858 /* "false alarms" are signals that our DSP tries to lock onto,
859 * but then determines that they are either noise, or transmissions
860 * from a distant wireless network (also "noise", really) that get
861 * "stepped on" by stronger transmissions within our own network.
862 * This algorithm attempts to set a sensitivity level that is high
863 * enough to receive all of our own network traffic, but not so
864 * high that our DSP gets too busy trying to lock onto non-network
866 static int iwl4965_sens_energy_cck(struct iwl4965_priv *priv,
869 struct statistics_general_data *rx_info)
873 u8 max_silence_rssi = 0;
875 u8 silence_rssi_a = 0;
876 u8 silence_rssi_b = 0;
877 u8 silence_rssi_c = 0;
880 /* "false_alarms" values below are cross-multiplications to assess the
881 * numbers of false alarms within the measured period of actual Rx
882 * (Rx is off when we're txing), vs the min/max expected false alarms
883 * (some should be expected if rx is sensitive enough) in a
884 * hypothetical listening period of 200 time units (TU), 204.8 msec:
886 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
889 u32 false_alarms = norm_fa * 200 * 1024;
890 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
891 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
892 struct iwl4965_sensitivity_data *data = NULL;
894 data = &(priv->sensitivity_data);
896 data->nrg_auto_corr_silence_diff = 0;
898 /* Find max silence rssi among all 3 receivers.
899 * This is background noise, which may include transmissions from other
900 * networks, measured during silence before our network's beacon */
901 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
902 ALL_BAND_FILTER) >> 8);
903 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
904 ALL_BAND_FILTER) >> 8);
905 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
906 ALL_BAND_FILTER) >> 8);
908 val = max(silence_rssi_b, silence_rssi_c);
909 max_silence_rssi = max(silence_rssi_a, (u8) val);
911 /* Store silence rssi in 20-beacon history table */
912 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
913 data->nrg_silence_idx++;
914 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
915 data->nrg_silence_idx = 0;
917 /* Find max silence rssi across 20 beacon history */
918 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
919 val = data->nrg_silence_rssi[i];
920 silence_ref = max(silence_ref, val);
922 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
923 silence_rssi_a, silence_rssi_b, silence_rssi_c,
926 /* Find max rx energy (min value!) among all 3 receivers,
927 * measured during beacon frame.
928 * Save it in 10-beacon history table. */
929 i = data->nrg_energy_idx;
930 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
931 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
933 data->nrg_energy_idx++;
934 if (data->nrg_energy_idx >= 10)
935 data->nrg_energy_idx = 0;
937 /* Find min rx energy (max value) across 10 beacon history.
938 * This is the minimum signal level that we want to receive well.
939 * Add backoff (margin so we don't miss slightly lower energy frames).
940 * This establishes an upper bound (min value) for energy threshold. */
941 max_nrg_cck = data->nrg_value[0];
942 for (i = 1; i < 10; i++)
943 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
946 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
947 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
948 rx_info->beacon_energy_c, max_nrg_cck - 6);
950 /* Count number of consecutive beacons with fewer-than-desired
952 if (false_alarms < min_false_alarms)
953 data->num_in_cck_no_fa++;
955 data->num_in_cck_no_fa = 0;
956 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
957 data->num_in_cck_no_fa);
959 /* If we got too many false alarms this time, reduce sensitivity */
960 if (false_alarms > max_false_alarms) {
961 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
962 false_alarms, max_false_alarms);
963 IWL_DEBUG_CALIB("... reducing sensitivity\n");
964 data->nrg_curr_state = IWL_FA_TOO_MANY;
966 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
967 /* Store for "fewer than desired" on later beacon */
968 data->nrg_silence_ref = silence_ref;
970 /* increase energy threshold (reduce nrg value)
971 * to decrease sensitivity */
972 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
973 data->nrg_th_cck = data->nrg_th_cck
977 /* increase auto_corr values to decrease sensitivity */
978 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
979 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
981 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
982 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
984 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
985 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
987 /* Else if we got fewer than desired, increase sensitivity */
988 } else if (false_alarms < min_false_alarms) {
989 data->nrg_curr_state = IWL_FA_TOO_FEW;
991 /* Compare silence level with silence level for most recent
992 * healthy number or too many false alarms */
993 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
996 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
997 false_alarms, min_false_alarms,
998 data->nrg_auto_corr_silence_diff);
1000 /* Increase value to increase sensitivity, but only if:
1001 * 1a) previous beacon did *not* have *too many* false alarms
1002 * 1b) AND there's a significant difference in Rx levels
1003 * from a previous beacon with too many, or healthy # FAs
1004 * OR 2) We've seen a lot of beacons (100) with too few
1006 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1007 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1008 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1010 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1011 /* Increase nrg value to increase sensitivity */
1012 val = data->nrg_th_cck + NRG_STEP_CCK;
1013 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1015 /* Decrease auto_corr values to increase sensitivity */
1016 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1017 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1019 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1020 data->auto_corr_cck_mrc =
1021 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1024 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1026 /* Else we got a healthy number of false alarms, keep status quo */
1028 IWL_DEBUG_CALIB(" FA in safe zone\n");
1029 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1031 /* Store for use in "fewer than desired" with later beacon */
1032 data->nrg_silence_ref = silence_ref;
1034 /* If previous beacon had too many false alarms,
1035 * give it some extra margin by reducing sensitivity again
1036 * (but don't go below measured energy of desired Rx) */
1037 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1038 IWL_DEBUG_CALIB("... increasing margin\n");
1039 data->nrg_th_cck -= NRG_MARGIN;
1043 /* Make sure the energy threshold does not go above the measured
1044 * energy of the desired Rx signals (reduced by backoff margin),
1045 * or else we might start missing Rx frames.
1046 * Lower value is higher energy, so we use max()!
1048 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1049 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1051 data->nrg_prev_state = data->nrg_curr_state;
1057 static int iwl4965_sens_auto_corr_ofdm(struct iwl4965_priv *priv,
1062 u32 false_alarms = norm_fa * 200 * 1024;
1063 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1064 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
1065 struct iwl4965_sensitivity_data *data = NULL;
1067 data = &(priv->sensitivity_data);
1069 /* If we got too many false alarms this time, reduce sensitivity */
1070 if (false_alarms > max_false_alarms) {
1072 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1073 false_alarms, max_false_alarms);
1075 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1076 data->auto_corr_ofdm =
1077 min((u32)AUTO_CORR_MAX_OFDM, val);
1079 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1080 data->auto_corr_ofdm_mrc =
1081 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1083 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1084 data->auto_corr_ofdm_x1 =
1085 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1087 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1088 data->auto_corr_ofdm_mrc_x1 =
1089 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1092 /* Else if we got fewer than desired, increase sensitivity */
1093 else if (false_alarms < min_false_alarms) {
1095 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1096 false_alarms, min_false_alarms);
1098 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1099 data->auto_corr_ofdm =
1100 max((u32)AUTO_CORR_MIN_OFDM, val);
1102 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1103 data->auto_corr_ofdm_mrc =
1104 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1106 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1107 data->auto_corr_ofdm_x1 =
1108 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1110 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1111 data->auto_corr_ofdm_mrc_x1 =
1112 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1116 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1117 min_false_alarms, false_alarms, max_false_alarms);
1122 static int iwl4965_sensitivity_callback(struct iwl4965_priv *priv,
1123 struct iwl4965_cmd *cmd, struct sk_buff *skb)
1125 /* We didn't cache the SKB; let the caller free it */
1129 /* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
1130 static int iwl4965_sensitivity_write(struct iwl4965_priv *priv, u8 flags)
1133 struct iwl4965_sensitivity_cmd cmd ;
1134 struct iwl4965_sensitivity_data *data = NULL;
1135 struct iwl4965_host_cmd cmd_out = {
1136 .id = SENSITIVITY_CMD,
1137 .len = sizeof(struct iwl4965_sensitivity_cmd),
1138 .meta.flags = flags,
1142 data = &(priv->sensitivity_data);
1144 memset(&cmd, 0, sizeof(cmd));
1146 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1147 cpu_to_le16((u16)data->auto_corr_ofdm);
1148 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1149 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1150 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1151 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1152 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1153 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1155 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1156 cpu_to_le16((u16)data->auto_corr_cck);
1157 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1158 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1160 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1161 cpu_to_le16((u16)data->nrg_th_cck);
1162 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1163 cpu_to_le16((u16)data->nrg_th_ofdm);
1165 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1166 __constant_cpu_to_le16(190);
1167 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1168 __constant_cpu_to_le16(390);
1169 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1170 __constant_cpu_to_le16(62);
1172 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1173 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1174 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1177 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1178 data->auto_corr_cck, data->auto_corr_cck_mrc,
1181 /* Update uCode's "work" table, and copy it to DSP */
1182 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1184 if (flags & CMD_ASYNC)
1185 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
1187 /* Don't send command to uCode if nothing has changed */
1188 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1189 sizeof(u16)*HD_TABLE_SIZE)) {
1190 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1194 /* Copy table for comparison next time */
1195 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1196 sizeof(u16)*HD_TABLE_SIZE);
1198 rc = iwl4965_send_cmd(priv, &cmd_out);
1200 IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n");
1207 void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, u8 force)
1211 struct iwl4965_sensitivity_data *data = NULL;
1213 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1216 memset(&(priv->sensitivity_tbl[0]), 0,
1217 sizeof(u16)*HD_TABLE_SIZE);
1219 /* Clear driver's sensitivity algo data */
1220 data = &(priv->sensitivity_data);
1221 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
1223 data->num_in_cck_no_fa = 0;
1224 data->nrg_curr_state = IWL_FA_TOO_MANY;
1225 data->nrg_prev_state = IWL_FA_TOO_MANY;
1226 data->nrg_silence_ref = 0;
1227 data->nrg_silence_idx = 0;
1228 data->nrg_energy_idx = 0;
1230 for (i = 0; i < 10; i++)
1231 data->nrg_value[i] = 0;
1233 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1234 data->nrg_silence_rssi[i] = 0;
1236 data->auto_corr_ofdm = 90;
1237 data->auto_corr_ofdm_mrc = 170;
1238 data->auto_corr_ofdm_x1 = 105;
1239 data->auto_corr_ofdm_mrc_x1 = 220;
1240 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1241 data->auto_corr_cck_mrc = 200;
1242 data->nrg_th_cck = 100;
1243 data->nrg_th_ofdm = 100;
1245 data->last_bad_plcp_cnt_ofdm = 0;
1246 data->last_fa_cnt_ofdm = 0;
1247 data->last_bad_plcp_cnt_cck = 0;
1248 data->last_fa_cnt_cck = 0;
1250 /* Clear prior Sensitivity command data to force send to uCode */
1252 memset(&(priv->sensitivity_tbl[0]), 0,
1253 sizeof(u16)*HD_TABLE_SIZE);
1255 rc |= iwl4965_sensitivity_write(priv, flags);
1256 IWL_DEBUG_CALIB("<<return 0x%X\n", rc);
1262 /* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1263 * Called after every association, but this runs only once!
1264 * ... once chain noise is calibrated the first time, it's good forever. */
1265 void iwl4965_chain_noise_reset(struct iwl4965_priv *priv)
1267 struct iwl4965_chain_noise_data *data = NULL;
1270 data = &(priv->chain_noise_data);
1271 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl4965_is_associated(priv)) {
1272 struct iwl4965_calibration_cmd cmd;
1274 memset(&cmd, 0, sizeof(cmd));
1275 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1276 cmd.diff_gain_a = 0;
1277 cmd.diff_gain_b = 0;
1278 cmd.diff_gain_c = 0;
1279 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1282 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1283 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1289 * Accumulate 20 beacons of signal and noise statistics for each of
1290 * 3 receivers/antennas/rx-chains, then figure out:
1291 * 1) Which antennas are connected.
1292 * 2) Differential rx gain settings to balance the 3 receivers.
1294 static void iwl4965_noise_calibration(struct iwl4965_priv *priv,
1295 struct iwl4965_notif_statistics *stat_resp)
1297 struct iwl4965_chain_noise_data *data = NULL;
1306 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1307 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1308 u32 max_average_sig;
1309 u16 max_average_sig_antenna_i;
1310 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1311 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1313 u16 chan_num = INITIALIZATION_VALUE;
1314 u32 band = INITIALIZATION_VALUE;
1315 u32 active_chains = 0;
1316 unsigned long flags;
1317 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1319 data = &(priv->chain_noise_data);
1321 /* Accumulate just the first 20 beacons after the first association,
1322 * then we're done forever. */
1323 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1324 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1325 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1329 spin_lock_irqsave(&priv->lock, flags);
1330 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1331 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1332 spin_unlock_irqrestore(&priv->lock, flags);
1336 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1337 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1339 /* Make sure we accumulate data for just the associated channel
1340 * (even if scanning). */
1341 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1342 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1343 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1344 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1346 spin_unlock_irqrestore(&priv->lock, flags);
1350 /* Accumulate beacon statistics values across 20 beacons */
1351 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1353 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1355 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1358 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1359 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1360 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1362 spin_unlock_irqrestore(&priv->lock, flags);
1364 data->beacon_count++;
1366 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1367 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1368 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1370 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1371 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1372 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1374 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1375 data->beacon_count);
1376 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1377 chain_sig_a, chain_sig_b, chain_sig_c);
1378 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1379 chain_noise_a, chain_noise_b, chain_noise_c);
1381 /* If this is the 20th beacon, determine:
1382 * 1) Disconnected antennas (using signal strengths)
1383 * 2) Differential gain (using silence noise) to balance receivers */
1384 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1386 /* Analyze signal for disconnected antenna */
1387 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1388 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1389 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1391 if (average_sig[0] >= average_sig[1]) {
1392 max_average_sig = average_sig[0];
1393 max_average_sig_antenna_i = 0;
1394 active_chains = (1 << max_average_sig_antenna_i);
1396 max_average_sig = average_sig[1];
1397 max_average_sig_antenna_i = 1;
1398 active_chains = (1 << max_average_sig_antenna_i);
1401 if (average_sig[2] >= max_average_sig) {
1402 max_average_sig = average_sig[2];
1403 max_average_sig_antenna_i = 2;
1404 active_chains = (1 << max_average_sig_antenna_i);
1407 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1408 average_sig[0], average_sig[1], average_sig[2]);
1409 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1410 max_average_sig, max_average_sig_antenna_i);
1412 /* Compare signal strengths for all 3 receivers. */
1413 for (i = 0; i < NUM_RX_CHAINS; i++) {
1414 if (i != max_average_sig_antenna_i) {
1415 s32 rssi_delta = (max_average_sig -
1418 /* If signal is very weak, compared with
1419 * strongest, mark it as disconnected. */
1420 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1421 data->disconn_array[i] = 1;
1423 active_chains |= (1 << i);
1424 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1425 "disconn_array[i] = %d\n",
1426 i, rssi_delta, data->disconn_array[i]);
1430 /*If both chains A & B are disconnected -
1431 * connect B and leave A as is */
1432 if (data->disconn_array[CHAIN_A] &&
1433 data->disconn_array[CHAIN_B]) {
1434 data->disconn_array[CHAIN_B] = 0;
1435 active_chains |= (1 << CHAIN_B);
1436 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1437 "W/A - declare B as connected\n");
1440 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1443 /* Save for use within RXON, TX, SCAN commands, etc. */
1444 priv->valid_antenna = active_chains;
1446 /* Analyze noise for rx balance */
1447 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1448 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1449 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1451 for (i = 0; i < NUM_RX_CHAINS; i++) {
1452 if (!(data->disconn_array[i]) &&
1453 (average_noise[i] <= min_average_noise)) {
1454 /* This means that chain i is active and has
1455 * lower noise values so far: */
1456 min_average_noise = average_noise[i];
1457 min_average_noise_antenna_i = i;
1461 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1463 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1464 average_noise[0], average_noise[1],
1467 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1468 min_average_noise, min_average_noise_antenna_i);
1470 for (i = 0; i < NUM_RX_CHAINS; i++) {
1473 if (!(data->disconn_array[i]) &&
1474 (data->delta_gain_code[i] ==
1475 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1476 delta_g = average_noise[i] - min_average_noise;
1477 data->delta_gain_code[i] = (u8)((delta_g *
1479 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE <
1480 data->delta_gain_code[i])
1481 data->delta_gain_code[i] =
1482 CHAIN_NOISE_MAX_DELTA_GAIN_CODE;
1484 data->delta_gain_code[i] =
1485 (data->delta_gain_code[i] | (1 << 2));
1487 data->delta_gain_code[i] = 0;
1489 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1490 data->delta_gain_code[0],
1491 data->delta_gain_code[1],
1492 data->delta_gain_code[2]);
1494 /* Differential gain gets sent to uCode only once */
1495 if (!data->radio_write) {
1496 struct iwl4965_calibration_cmd cmd;
1497 data->radio_write = 1;
1499 memset(&cmd, 0, sizeof(cmd));
1500 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1501 cmd.diff_gain_a = data->delta_gain_code[0];
1502 cmd.diff_gain_b = data->delta_gain_code[1];
1503 cmd.diff_gain_c = data->delta_gain_code[2];
1504 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1507 IWL_DEBUG_CALIB("fail sending cmd "
1508 "REPLY_PHY_CALIBRATION_CMD \n");
1510 /* TODO we might want recalculate
1511 * rx_chain in rxon cmd */
1513 /* Mark so we run this algo only once! */
1514 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1516 data->chain_noise_a = 0;
1517 data->chain_noise_b = 0;
1518 data->chain_noise_c = 0;
1519 data->chain_signal_a = 0;
1520 data->chain_signal_b = 0;
1521 data->chain_signal_c = 0;
1522 data->beacon_count = 0;
1527 static void iwl4965_sensitivity_calibration(struct iwl4965_priv *priv,
1528 struct iwl4965_notif_statistics *resp)
1538 struct iwl4965_sensitivity_data *data = NULL;
1539 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1540 struct statistics_rx *statistics = &(resp->rx);
1541 unsigned long flags;
1542 struct statistics_general_data statis;
1544 data = &(priv->sensitivity_data);
1546 if (!iwl4965_is_associated(priv)) {
1547 IWL_DEBUG_CALIB("<< - not associated\n");
1551 spin_lock_irqsave(&priv->lock, flags);
1552 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1553 IWL_DEBUG_CALIB("<< invalid data.\n");
1554 spin_unlock_irqrestore(&priv->lock, flags);
1558 /* Extract Statistics: */
1559 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1560 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1561 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1562 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1563 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1565 statis.beacon_silence_rssi_a =
1566 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1567 statis.beacon_silence_rssi_b =
1568 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1569 statis.beacon_silence_rssi_c =
1570 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1571 statis.beacon_energy_a =
1572 le32_to_cpu(statistics->general.beacon_energy_a);
1573 statis.beacon_energy_b =
1574 le32_to_cpu(statistics->general.beacon_energy_b);
1575 statis.beacon_energy_c =
1576 le32_to_cpu(statistics->general.beacon_energy_c);
1578 spin_unlock_irqrestore(&priv->lock, flags);
1580 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1582 if (!rx_enable_time) {
1583 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1587 /* These statistics increase monotonically, and do not reset
1588 * at each beacon. Calculate difference from last value, or just
1589 * use the new statistics value if it has reset or wrapped around. */
1590 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1591 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1593 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1594 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1597 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
1598 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
1600 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
1601 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1604 if (data->last_fa_cnt_ofdm > fa_ofdm)
1605 data->last_fa_cnt_ofdm = fa_ofdm;
1607 fa_ofdm -= data->last_fa_cnt_ofdm;
1608 data->last_fa_cnt_ofdm += fa_ofdm;
1611 if (data->last_fa_cnt_cck > fa_cck)
1612 data->last_fa_cnt_cck = fa_cck;
1614 fa_cck -= data->last_fa_cnt_cck;
1615 data->last_fa_cnt_cck += fa_cck;
1618 /* Total aborted signal locks */
1619 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1620 norm_fa_cck = fa_cck + bad_plcp_cck;
1622 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1623 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1625 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1626 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1627 rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC);
1632 static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1634 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
1637 mutex_lock(&priv->mutex);
1639 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1640 test_bit(STATUS_SCANNING, &priv->status)) {
1641 mutex_unlock(&priv->mutex);
1645 if (priv->start_calib) {
1646 iwl4965_noise_calibration(priv, &priv->statistics);
1648 if (priv->sensitivity_data.state ==
1649 IWL_SENS_CALIB_NEED_REINIT) {
1650 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1651 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1653 iwl4965_sensitivity_calibration(priv,
1657 mutex_unlock(&priv->mutex);
1660 #endif /*CONFIG_IWL4965_SENSITIVITY*/
1662 static void iwl4965_bg_txpower_work(struct work_struct *work)
1664 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
1667 /* If a scan happened to start before we got here
1668 * then just return; the statistics notification will
1669 * kick off another scheduled work to compensate for
1670 * any temperature delta we missed here. */
1671 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1672 test_bit(STATUS_SCANNING, &priv->status))
1675 mutex_lock(&priv->mutex);
1677 /* Regardless of if we are assocaited, we must reconfigure the
1678 * TX power since frames can be sent on non-radar channels while
1680 iwl4965_hw_reg_send_txpower(priv);
1682 /* Update last_temperature to keep is_calib_needed from running
1683 * when it isn't needed... */
1684 priv->last_temperature = priv->temperature;
1686 mutex_unlock(&priv->mutex);
1690 * Acquire priv->lock before calling this function !
1692 static void iwl4965_set_wr_ptrs(struct iwl4965_priv *priv, int txq_id, u32 index)
1694 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
1695 (index & 0xff) | (txq_id << 8));
1696 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index);
1700 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
1701 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
1702 * @scd_retry: (1) Indicates queue will be used in aggregation mode
1704 * NOTE: Acquire priv->lock before calling this function !
1706 static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv,
1707 struct iwl4965_tx_queue *txq,
1708 int tx_fifo_id, int scd_retry)
1710 int txq_id = txq->q.id;
1712 /* Find out whether to activate Tx queue */
1713 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1715 /* Set up and activate */
1716 iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id),
1717 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1718 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1719 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
1720 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1721 SCD_QUEUE_STTS_REG_MSK);
1723 txq->sched_retry = scd_retry;
1725 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
1726 active ? "Activate" : "Deactivate",
1727 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1730 static const u16 default_queue_to_tx_fifo[] = {
1740 static inline void iwl4965_txq_ctx_activate(struct iwl4965_priv *priv, int txq_id)
1742 set_bit(txq_id, &priv->txq_ctx_active_msk);
1745 static inline void iwl4965_txq_ctx_deactivate(struct iwl4965_priv *priv, int txq_id)
1747 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1750 int iwl4965_alive_notify(struct iwl4965_priv *priv)
1754 unsigned long flags;
1757 spin_lock_irqsave(&priv->lock, flags);
1759 #ifdef CONFIG_IWL4965_SENSITIVITY
1760 memset(&(priv->sensitivity_data), 0,
1761 sizeof(struct iwl4965_sensitivity_data));
1762 memset(&(priv->chain_noise_data), 0,
1763 sizeof(struct iwl4965_chain_noise_data));
1764 for (i = 0; i < NUM_RX_CHAINS; i++)
1765 priv->chain_noise_data.delta_gain_code[i] =
1766 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1767 #endif /* CONFIG_IWL4965_SENSITIVITY*/
1768 rc = iwl4965_grab_nic_access(priv);
1770 spin_unlock_irqrestore(&priv->lock, flags);
1774 /* Clear 4965's internal Tx Scheduler data base */
1775 priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR);
1776 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1777 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1778 iwl4965_write_targ_mem(priv, a, 0);
1779 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
1780 iwl4965_write_targ_mem(priv, a, 0);
1781 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
1782 iwl4965_write_targ_mem(priv, a, 0);
1784 /* Tel 4965 where to find Tx byte count tables */
1785 iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR,
1786 (priv->hw_setting.shared_phys +
1787 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
1789 /* Disable chain mode for all queues */
1790 iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0);
1792 /* Initialize each Tx queue (including the command queue) */
1793 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
1795 /* TFD circular buffer read/write indexes */
1796 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0);
1797 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1799 /* Max Tx Window size for Scheduler-ACK mode */
1800 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1801 SCD_CONTEXT_QUEUE_OFFSET(i),
1803 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1804 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1807 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
1808 SCD_CONTEXT_QUEUE_OFFSET(i) +
1811 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1812 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1815 iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK,
1816 (1 << priv->hw_setting.max_txq_num) - 1);
1818 /* Activate all Tx DMA/FIFO channels */
1819 iwl4965_write_prph(priv, KDR_SCD_TXFACT,
1820 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1822 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
1824 /* Map each Tx/cmd queue to its corresponding fifo */
1825 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1826 int ac = default_queue_to_tx_fifo[i];
1827 iwl4965_txq_ctx_activate(priv, i);
1828 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1831 iwl4965_release_nic_access(priv);
1832 spin_unlock_irqrestore(&priv->lock, flags);
1838 * iwl4965_hw_set_hw_setting
1840 * Called when initializing driver
1842 int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv)
1844 /* Allocate area for Tx byte count tables and Rx queue status */
1845 priv->hw_setting.shared_virt =
1846 pci_alloc_consistent(priv->pci_dev,
1847 sizeof(struct iwl4965_shared),
1848 &priv->hw_setting.shared_phys);
1850 if (!priv->hw_setting.shared_virt)
1853 memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl4965_shared));
1855 priv->hw_setting.max_txq_num = iwl4965_param_queues_num;
1856 priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
1857 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
1858 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
1859 if (iwl4965_param_amsdu_size_8K)
1860 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_8K;
1862 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_4K;
1863 priv->hw_setting.max_pkt_size = priv->hw_setting.rx_buf_size - 256;
1864 priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
1865 priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
1867 priv->hw_setting.tx_ant_num = 2;
1873 * iwl4965_hw_txq_ctx_free - Free TXQ Context
1875 * Destroy all TX DMA queues and structures
1877 void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv)
1882 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
1883 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
1885 /* Keep-warm buffer */
1886 iwl4965_kw_free(priv);
1890 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1892 * Does NOT advance any TFD circular buffer read/write indexes
1893 * Does NOT free the TFD itself (which is within circular buffer)
1895 int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
1897 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
1898 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
1899 struct pci_dev *dev = priv->pci_dev;
1904 /* Host command buffers stay mapped in memory, nothing to clean */
1905 if (txq->q.id == IWL_CMD_QUEUE_NUM)
1908 /* Sanity check on number of chunks */
1909 counter = IWL_GET_BITS(*bd, num_tbs);
1910 if (counter > MAX_NUM_OF_TBS) {
1911 IWL_ERROR("Too many chunks: %i\n", counter);
1912 /* @todo issue fatal error, it is quite serious situation */
1916 /* Unmap chunks, if any.
1917 * TFD info for odd chunks is different format than for even chunks. */
1918 for (i = 0; i < counter; i++) {
1925 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1926 (IWL_GET_BITS(bd->pa[index],
1927 tb2_addr_hi20) << 16),
1928 IWL_GET_BITS(bd->pa[index], tb2_len),
1932 pci_unmap_single(dev,
1933 le32_to_cpu(bd->pa[index].tb1_addr),
1934 IWL_GET_BITS(bd->pa[index], tb1_len),
1937 /* Free SKB, if any, for this chunk */
1938 if (txq->txb[txq->q.read_ptr].skb[i]) {
1939 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
1942 txq->txb[txq->q.read_ptr].skb[i] = NULL;
1948 int iwl4965_hw_reg_set_txpower(struct iwl4965_priv *priv, s8 power)
1950 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
1954 static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1967 *res = ((num * 2 + denom) / (denom * 2)) * sign;
1973 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1975 * Determines power supply voltage compensation for txpower calculations.
1976 * Returns number of 1/2-dB steps to subtract from gain table index,
1977 * to compensate for difference between power supply voltage during
1978 * factory measurements, vs. current power supply voltage.
1980 * Voltage indication is higher for lower voltage.
1981 * Lower voltage requires more gain (lower gain table index).
1983 static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1984 s32 current_voltage)
1988 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1989 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1992 iwl4965_math_div_round(current_voltage - eeprom_voltage,
1993 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1995 if (current_voltage > eeprom_voltage)
1997 if ((comp < -2) || (comp > 2))
2003 static const struct iwl4965_channel_info *
2004 iwl4965_get_channel_txpower_info(struct iwl4965_priv *priv,
2005 enum ieee80211_band band, u16 channel)
2007 const struct iwl4965_channel_info *ch_info;
2009 ch_info = iwl4965_get_channel_info(priv, band, channel);
2011 if (!is_channel_valid(ch_info))
2017 static s32 iwl4965_get_tx_atten_grp(u16 channel)
2019 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
2020 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
2021 return CALIB_CH_GROUP_5;
2023 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
2024 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
2025 return CALIB_CH_GROUP_1;
2027 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
2028 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
2029 return CALIB_CH_GROUP_2;
2031 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
2032 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
2033 return CALIB_CH_GROUP_3;
2035 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
2036 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
2037 return CALIB_CH_GROUP_4;
2039 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
2043 static u32 iwl4965_get_sub_band(const struct iwl4965_priv *priv, u32 channel)
2047 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2048 if (priv->eeprom.calib_info.band_info[b].ch_from == 0)
2051 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from)
2052 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to))
2059 static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2066 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
2072 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
2074 * Interpolates factory measurements from the two sample channels within a
2075 * sub-band, to apply to channel of interest. Interpolation is proportional to
2076 * differences in channel frequencies, which is proportional to differences
2077 * in channel number.
2079 static int iwl4965_interpolate_chan(struct iwl4965_priv *priv, u32 channel,
2080 struct iwl4965_eeprom_calib_ch_info *chan_info)
2085 const struct iwl4965_eeprom_calib_measure *m1;
2086 const struct iwl4965_eeprom_calib_measure *m2;
2087 struct iwl4965_eeprom_calib_measure *omeas;
2091 s = iwl4965_get_sub_band(priv, channel);
2092 if (s >= EEPROM_TX_POWER_BANDS) {
2093 IWL_ERROR("Tx Power can not find channel %d ", channel);
2097 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num;
2098 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num;
2099 chan_info->ch_num = (u8) channel;
2101 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
2102 channel, s, ch_i1, ch_i2);
2104 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2105 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2106 m1 = &(priv->eeprom.calib_info.band_info[s].ch1.
2107 measurements[c][m]);
2108 m2 = &(priv->eeprom.calib_info.band_info[s].ch2.
2109 measurements[c][m]);
2110 omeas = &(chan_info->measurements[c][m]);
2113 (u8) iwl4965_interpolate_value(channel, ch_i1,
2118 (u8) iwl4965_interpolate_value(channel, ch_i1,
2119 m1->gain_idx, ch_i2,
2121 omeas->temperature =
2122 (u8) iwl4965_interpolate_value(channel, ch_i1,
2127 (s8) iwl4965_interpolate_value(channel, ch_i1,
2132 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
2133 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
2135 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
2136 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
2138 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
2139 m1->pa_det, m2->pa_det, omeas->pa_det);
2141 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
2142 m1->temperature, m2->temperature,
2143 omeas->temperature);
2150 /* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
2151 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
2152 static s32 back_off_table[] = {
2153 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
2154 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
2155 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
2156 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
2160 /* Thermal compensation values for txpower for various frequency ranges ...
2161 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
2162 static struct iwl4965_txpower_comp_entry {
2163 s32 degrees_per_05db_a;
2164 s32 degrees_per_05db_a_denom;
2165 } tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
2166 {9, 2}, /* group 0 5.2, ch 34-43 */
2167 {4, 1}, /* group 1 5.2, ch 44-70 */
2168 {4, 1}, /* group 2 5.2, ch 71-124 */
2169 {4, 1}, /* group 3 5.2, ch 125-200 */
2170 {3, 1} /* group 4 2.4, ch all */
2173 static s32 get_min_power_index(s32 rate_power_index, u32 band)
2176 if ((rate_power_index & 7) <= 4)
2177 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
2179 return MIN_TX_GAIN_INDEX;
2187 static const struct gain_entry gain_table[2][108] = {
2188 /* 5.2GHz power gain index table */
2190 {123, 0x3F}, /* highest txpower */
2299 /* 2.4GHz power gain index table */
2301 {110, 0x3f}, /* highest txpower */
2412 static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 channel,
2413 u8 is_fat, u8 ctrl_chan_high,
2414 struct iwl4965_tx_power_db *tx_power_tbl)
2416 u8 saturation_power;
2418 s32 user_target_power;
2422 s32 current_regulatory;
2423 s32 txatten_grp = CALIB_CH_GROUP_MAX;
2426 const struct iwl4965_channel_info *ch_info = NULL;
2427 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info;
2428 const struct iwl4965_eeprom_calib_measure *measurement;
2431 s32 voltage_compensation;
2432 s32 degrees_per_05db_num;
2433 s32 degrees_per_05db_denom;
2435 s32 temperature_comp[2];
2436 s32 factory_gain_index[2];
2437 s32 factory_actual_pwr[2];
2440 /* Sanity check requested level (dBm) */
2441 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2442 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2443 priv->user_txpower_limit);
2446 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2447 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2448 priv->user_txpower_limit);
2452 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2453 * are used for indexing into txpower table) */
2454 user_target_power = 2 * priv->user_txpower_limit;
2456 /* Get current (RXON) channel, band, width */
2458 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
2460 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2466 /* get txatten group, used to select 1) thermal txpower adjustment
2467 * and 2) mimo txpower balance between Tx chains. */
2468 txatten_grp = iwl4965_get_tx_atten_grp(channel);
2469 if (txatten_grp < 0)
2472 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
2473 channel, txatten_grp);
2482 /* hardware txpower limits ...
2483 * saturation (clipping distortion) txpowers are in half-dBm */
2485 saturation_power = priv->eeprom.calib_info.saturation_power24;
2487 saturation_power = priv->eeprom.calib_info.saturation_power52;
2489 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2490 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
2492 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
2494 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
2497 /* regulatory txpower limits ... reg_limit values are in half-dBm,
2498 * max_power_avg values are in dBm, convert * 2 */
2500 reg_limit = ch_info->fat_max_power_avg * 2;
2502 reg_limit = ch_info->max_power_avg * 2;
2504 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
2505 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
2507 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
2509 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
2512 /* Interpolate txpower calibration values for this channel,
2513 * based on factory calibration tests on spaced channels. */
2514 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2516 /* calculate tx gain adjustment based on power supply voltage */
2517 voltage = priv->eeprom.calib_info.voltage;
2518 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2519 voltage_compensation =
2520 iwl4965_get_voltage_compensation(voltage, init_voltage);
2522 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
2524 voltage, voltage_compensation);
2526 /* get current temperature (Celsius) */
2527 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
2528 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
2529 current_temp = KELVIN_TO_CELSIUS(current_temp);
2531 /* select thermal txpower adjustment params, based on channel group
2532 * (same frequency group used for mimo txatten adjustment) */
2533 degrees_per_05db_num =
2534 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
2535 degrees_per_05db_denom =
2536 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
2538 /* get per-chain txpower values from factory measurements */
2539 for (c = 0; c < 2; c++) {
2540 measurement = &ch_eeprom_info.measurements[c][1];
2542 /* txgain adjustment (in half-dB steps) based on difference
2543 * between factory and current temperature */
2544 factory_temp = measurement->temperature;
2545 iwl4965_math_div_round((current_temp - factory_temp) *
2546 degrees_per_05db_denom,
2547 degrees_per_05db_num,
2548 &temperature_comp[c]);
2550 factory_gain_index[c] = measurement->gain_idx;
2551 factory_actual_pwr[c] = measurement->actual_pow;
2553 IWL_DEBUG_TXPOWER("chain = %d\n", c);
2554 IWL_DEBUG_TXPOWER("fctry tmp %d, "
2555 "curr tmp %d, comp %d steps\n",
2556 factory_temp, current_temp,
2557 temperature_comp[c]);
2559 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
2560 factory_gain_index[c],
2561 factory_actual_pwr[c]);
2564 /* for each of 33 bit-rates (including 1 for CCK) */
2565 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
2567 union iwl4965_tx_power_dual_stream tx_power;
2569 /* for mimo, reduce each chain's txpower by half
2570 * (3dB, 6 steps), so total output power is regulatory
2573 current_regulatory = reg_limit -
2574 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
2577 current_regulatory = reg_limit;
2581 /* find txpower limit, either hardware or regulatory */
2582 power_limit = saturation_power - back_off_table[i];
2583 if (power_limit > current_regulatory)
2584 power_limit = current_regulatory;
2586 /* reduce user's txpower request if necessary
2587 * for this rate on this channel */
2588 target_power = user_target_power;
2589 if (target_power > power_limit)
2590 target_power = power_limit;
2592 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
2593 i, saturation_power - back_off_table[i],
2594 current_regulatory, user_target_power,
2597 /* for each of 2 Tx chains (radio transmitters) */
2598 for (c = 0; c < 2; c++) {
2603 (s32)le32_to_cpu(priv->card_alive_init.
2604 tx_atten[txatten_grp][c]);
2608 /* calculate index; higher index means lower txpower */
2609 power_index = (u8) (factory_gain_index[c] -
2611 factory_actual_pwr[c]) -
2612 temperature_comp[c] -
2613 voltage_compensation +
2616 /* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
2619 if (power_index < get_min_power_index(i, band))
2620 power_index = get_min_power_index(i, band);
2622 /* adjust 5 GHz index to support negative indexes */
2626 /* CCK, rate 32, reduce txpower for CCK */
2627 if (i == POWER_TABLE_CCK_ENTRY)
2629 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
2631 /* stay within the table! */
2632 if (power_index > 107) {
2633 IWL_WARNING("txpower index %d > 107\n",
2637 if (power_index < 0) {
2638 IWL_WARNING("txpower index %d < 0\n",
2643 /* fill txpower command for this rate/chain */
2644 tx_power.s.radio_tx_gain[c] =
2645 gain_table[band][power_index].radio;
2646 tx_power.s.dsp_predis_atten[c] =
2647 gain_table[band][power_index].dsp;
2649 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
2650 "gain 0x%02x dsp %d\n",
2651 c, atten_value, power_index,
2652 tx_power.s.radio_tx_gain[c],
2653 tx_power.s.dsp_predis_atten[c]);
2654 }/* for each chain */
2656 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
2658 }/* for each rate */
2664 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
2666 * Uses the active RXON for channel, band, and characteristics (fat, high)
2667 * The power limit is taken from priv->user_txpower_limit.
2669 int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv)
2671 struct iwl4965_txpowertable_cmd cmd = { 0 };
2675 u8 ctrl_chan_high = 0;
2677 if (test_bit(STATUS_SCANNING, &priv->status)) {
2678 /* If this gets hit a lot, switch it to a BUG() and catch
2679 * the stack trace to find out who is calling this during
2681 IWL_WARNING("TX Power requested while scanning!\n");
2685 band = priv->band == IEEE80211_BAND_2GHZ;
2687 is_fat = is_fat_channel(priv->active_rxon.flags);
2690 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2694 cmd.channel = priv->active_rxon.channel;
2696 rc = iwl4965_fill_txpower_tbl(priv, band,
2697 le16_to_cpu(priv->active_rxon.channel),
2698 is_fat, ctrl_chan_high, &cmd.tx_power);
2702 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2706 int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel)
2711 u8 ctrl_chan_high = 0;
2712 struct iwl4965_channel_switch_cmd cmd = { 0 };
2713 const struct iwl4965_channel_info *ch_info;
2715 band = priv->band == IEEE80211_BAND_2GHZ;
2717 ch_info = iwl4965_get_channel_info(priv, priv->band, channel);
2719 is_fat = is_fat_channel(priv->staging_rxon.flags);
2722 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2726 cmd.expect_beacon = 0;
2727 cmd.channel = cpu_to_le16(channel);
2728 cmd.rxon_flags = priv->active_rxon.flags;
2729 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2730 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2732 cmd.expect_beacon = is_channel_radar(ch_info);
2734 cmd.expect_beacon = 1;
2736 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2737 ctrl_chan_high, &cmd.tx_power);
2739 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2743 rc = iwl4965_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
2747 #define RTS_HCCA_RETRY_LIMIT 3
2748 #define RTS_DFAULT_RETRY_LIMIT 60
2750 void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv,
2751 struct iwl4965_cmd *cmd,
2752 struct ieee80211_tx_control *ctrl,
2753 struct ieee80211_hdr *hdr, int sta_id,
2756 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
2757 u8 rts_retry_limit = 0;
2758 u8 data_retry_limit = 0;
2759 u16 fc = le16_to_cpu(hdr->frame_control);
2762 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2764 rate_plcp = iwl4965_rates[rate_idx].plcp;
2766 rts_retry_limit = (is_hcca) ?
2767 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2769 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2770 rate_flags |= RATE_MCS_CCK_MSK;
2773 if (ieee80211_is_probe_response(fc)) {
2774 data_retry_limit = 3;
2775 if (data_retry_limit < rts_retry_limit)
2776 rts_retry_limit = data_retry_limit;
2778 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2780 if (priv->data_retry_limit != -1)
2781 data_retry_limit = priv->data_retry_limit;
2784 if (ieee80211_is_data(fc)) {
2785 tx->initial_rate_index = 0;
2786 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2788 switch (fc & IEEE80211_FCTL_STYPE) {
2789 case IEEE80211_STYPE_AUTH:
2790 case IEEE80211_STYPE_DEAUTH:
2791 case IEEE80211_STYPE_ASSOC_REQ:
2792 case IEEE80211_STYPE_REASSOC_REQ:
2793 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2794 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2795 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
2802 /* Alternate between antenna A and B for successive frames */
2803 if (priv->use_ant_b_for_management_frame) {
2804 priv->use_ant_b_for_management_frame = 0;
2805 rate_flags |= RATE_MCS_ANT_B_MSK;
2807 priv->use_ant_b_for_management_frame = 1;
2808 rate_flags |= RATE_MCS_ANT_A_MSK;
2812 tx->rts_retry_limit = rts_retry_limit;
2813 tx->data_retry_limit = data_retry_limit;
2814 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
2817 int iwl4965_hw_get_rx_read(struct iwl4965_priv *priv)
2819 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
2821 return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num);
2824 int iwl4965_hw_get_temperature(struct iwl4965_priv *priv)
2826 return priv->temperature;
2829 unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv,
2830 struct iwl4965_frame *frame, u8 rate)
2832 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
2833 unsigned int frame_size;
2835 tx_beacon_cmd = &frame->u.beacon;
2836 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2838 tx_beacon_cmd->tx.sta_id = IWL4965_BROADCAST_ID;
2839 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2841 frame_size = iwl4965_fill_beacon_frame(priv,
2842 tx_beacon_cmd->frame,
2843 iwl4965_broadcast_addr,
2844 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2846 BUG_ON(frame_size > MAX_MPDU_SIZE);
2847 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2849 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
2850 tx_beacon_cmd->tx.rate_n_flags =
2851 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
2853 tx_beacon_cmd->tx.rate_n_flags =
2854 iwl4965_hw_set_rate_n_flags(rate, 0);
2856 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2857 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
2858 return (sizeof(*tx_beacon_cmd) + frame_size);
2862 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
2863 * given Tx queue, and enable the DMA channel used for that queue.
2865 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2866 * channels supported in hardware.
2868 int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
2871 unsigned long flags;
2872 int txq_id = txq->q.id;
2874 spin_lock_irqsave(&priv->lock, flags);
2875 rc = iwl4965_grab_nic_access(priv);
2877 spin_unlock_irqrestore(&priv->lock, flags);
2881 /* Circular buffer (TFD queue in DRAM) physical base address */
2882 iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
2883 txq->q.dma_addr >> 8);
2885 /* Enable DMA channel, using same id as for TFD queue */
2886 iwl4965_write_direct32(
2887 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2888 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2889 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
2890 iwl4965_release_nic_access(priv);
2891 spin_unlock_irqrestore(&priv->lock, flags);
2896 int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *ptr,
2897 dma_addr_t addr, u16 len)
2900 struct iwl4965_tfd_frame *tfd = ptr;
2901 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2903 /* Each TFD can point to a maximum 20 Tx buffers */
2904 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2905 IWL_ERROR("Error can not send more than %d chunks\n",
2910 index = num_tbs / 2;
2911 is_odd = num_tbs & 0x1;
2914 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
2915 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
2916 iwl_get_dma_hi_address(addr));
2917 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
2919 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
2920 (u32) (addr & 0xffff));
2921 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
2922 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
2925 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
2930 static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv)
2932 u16 hw_version = priv->eeprom.board_revision_4965;
2934 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
2935 ((hw_version >> 8) & 0x0F),
2936 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
2938 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
2939 priv->eeprom.board_pba_number_4965);
2942 #define IWL_TX_CRC_SIZE 4
2943 #define IWL_TX_DELIMITER_SIZE 4
2946 * iwl4965_tx_queue_update_wr_ptr - Set up entry in Tx byte-count array
2948 int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2949 struct iwl4965_tx_queue *txq, u16 byte_cnt)
2952 int txq_id = txq->q.id;
2953 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt;
2955 if (txq->need_update == 0)
2958 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2960 /* Set up byte count within first 256 entries */
2961 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2962 tfd_offset[txq->q.write_ptr], byte_cnt, len);
2964 /* If within first 64 entries, duplicate at end */
2965 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
2966 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2967 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
2974 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2976 * Selects how many and which Rx receivers/antennas/chains to use.
2977 * This should not be used for scan command ... it puts data in wrong place.
2979 void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
2981 u8 is_single = is_single_stream(priv);
2982 u8 idle_state, rx_state;
2984 priv->staging_rxon.rx_chain = 0;
2985 rx_state = idle_state = 3;
2987 /* Tell uCode which antennas are actually connected.
2988 * Before first association, we assume all antennas are connected.
2989 * Just after first association, iwl4965_noise_calibration()
2990 * checks which antennas actually *are* connected. */
2991 priv->staging_rxon.rx_chain |=
2992 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
2994 /* How many receivers should we use? */
2995 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
2996 priv->staging_rxon.rx_chain |=
2997 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
2998 priv->staging_rxon.rx_chain |=
2999 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3001 if (!is_single && (rx_state >= 2) &&
3002 !test_bit(STATUS_POWER_PMI, &priv->status))
3003 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3005 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3007 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3011 * sign_extend - Sign extend a value using specified bit as sign-bit
3013 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
3014 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
3016 * @param oper value to sign extend
3017 * @param index 0 based bit index (0<=index<32) to sign bit
3019 static s32 sign_extend(u32 oper, int index)
3021 u8 shift = 31 - index;
3023 return (s32)(oper << shift) >> shift;
3027 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
3028 * @statistics: Provides the temperature reading from the uCode
3030 * A return of <0 indicates bogus data in the statistics
3032 int iwl4965_get_temperature(const struct iwl4965_priv *priv)
3039 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
3040 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
3041 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
3042 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
3043 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
3044 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
3045 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
3047 IWL_DEBUG_TEMP("Running temperature calibration\n");
3048 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
3049 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
3050 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
3051 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
3055 * Temperature is only 23 bits, so sign extend out to 32.
3057 * NOTE If we haven't received a statistics notification yet
3058 * with an updated temperature, use R4 provided to us in the
3059 * "initialize" ALIVE response.
3061 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3062 vt = sign_extend(R4, 23);
3065 le32_to_cpu(priv->statistics.general.temperature), 23);
3067 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
3071 IWL_ERROR("Calibration conflict R1 == R3\n");
3075 /* Calculate temperature in degrees Kelvin, adjust by 97%.
3076 * Add offset to center the adjustment around 0 degrees Centigrade. */
3077 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3078 temperature /= (R3 - R1);
3079 temperature = (temperature * 97) / 100 +
3080 TEMPERATURE_CALIB_KELVIN_OFFSET;
3082 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
3083 KELVIN_TO_CELSIUS(temperature));
3088 /* Adjust Txpower only if temperature variance is greater than threshold. */
3089 #define IWL_TEMPERATURE_THRESHOLD 3
3092 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
3094 * If the temperature changed has changed sufficiently, then a recalibration
3097 * Assumes caller will replace priv->last_temperature once calibration
3100 static int iwl4965_is_temp_calib_needed(struct iwl4965_priv *priv)
3104 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
3105 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
3109 temp_diff = priv->temperature - priv->last_temperature;
3111 /* get absolute value */
3112 if (temp_diff < 0) {
3113 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
3114 temp_diff = -temp_diff;
3115 } else if (temp_diff == 0)
3116 IWL_DEBUG_POWER("Same temp, \n");
3118 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
3120 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
3121 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
3125 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
3130 /* Calculate noise level, based on measurements during network silence just
3131 * before arriving beacon. This measurement can be done only if we know
3132 * exactly when to expect beacons, therefore only when we're associated. */
3133 static void iwl4965_rx_calc_noise(struct iwl4965_priv *priv)
3135 struct statistics_rx_non_phy *rx_info
3136 = &(priv->statistics.rx.general);
3137 int num_active_rx = 0;
3138 int total_silence = 0;
3140 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3142 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3144 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3146 if (bcn_silence_a) {
3147 total_silence += bcn_silence_a;
3150 if (bcn_silence_b) {
3151 total_silence += bcn_silence_b;
3154 if (bcn_silence_c) {
3155 total_silence += bcn_silence_c;
3159 /* Average among active antennas */
3161 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3163 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3165 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3166 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3167 priv->last_rx_noise);
3170 void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
3172 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3176 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
3177 (int)sizeof(priv->statistics), pkt->len);
3179 change = ((priv->statistics.general.temperature !=
3180 pkt->u.stats.general.temperature) ||
3181 ((priv->statistics.flag &
3182 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3183 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3185 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3187 set_bit(STATUS_STATISTICS, &priv->status);
3189 /* Reschedule the statistics timer to occur in
3190 * REG_RECALIB_PERIOD seconds to ensure we get a
3191 * thermal update even if the uCode doesn't give
3193 mod_timer(&priv->statistics_periodic, jiffies +
3194 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3196 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3197 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3198 iwl4965_rx_calc_noise(priv);
3199 #ifdef CONFIG_IWL4965_SENSITIVITY
3200 queue_work(priv->workqueue, &priv->sensitivity_work);
3204 /* If the hardware hasn't reported a change in
3205 * temperature then don't bother computing a
3206 * calibrated temperature value */
3210 temp = iwl4965_get_temperature(priv);
3214 if (priv->temperature != temp) {
3215 if (priv->temperature)
3216 IWL_DEBUG_TEMP("Temperature changed "
3217 "from %dC to %dC\n",
3218 KELVIN_TO_CELSIUS(priv->temperature),
3219 KELVIN_TO_CELSIUS(temp));
3221 IWL_DEBUG_TEMP("Temperature "
3222 "initialized to %dC\n",
3223 KELVIN_TO_CELSIUS(temp));
3226 priv->temperature = temp;
3227 set_bit(STATUS_TEMPERATURE, &priv->status);
3229 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3230 iwl4965_is_temp_calib_needed(priv))
3231 queue_work(priv->workqueue, &priv->txpower_work);
3234 static void iwl4965_add_radiotap(struct iwl4965_priv *priv,
3235 struct sk_buff *skb,
3236 struct iwl4965_rx_phy_res *rx_start,
3237 struct ieee80211_rx_status *stats,
3240 s8 signal = stats->ssi;
3242 int rate = stats->rate_idx;
3243 u64 tsf = stats->mactime;
3244 __le16 phy_flags_hw = rx_start->phy_flags;
3245 struct iwl4965_rt_rx_hdr {
3246 struct ieee80211_radiotap_header rt_hdr;
3247 __le64 rt_tsf; /* TSF */
3248 u8 rt_flags; /* radiotap packet flags */
3249 u8 rt_rate; /* rate in 500kb/s */
3250 __le16 rt_channelMHz; /* channel in MHz */
3251 __le16 rt_chbitmask; /* channel bitfield */
3252 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
3254 u8 rt_antenna; /* antenna number */
3255 } __attribute__ ((packed)) *iwl4965_rt;
3257 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
3258 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
3259 if (net_ratelimit())
3260 printk(KERN_ERR "not enough headroom [%d] for "
3261 "radiotap head [%zd]\n",
3262 skb_headroom(skb), sizeof(*iwl4965_rt));
3266 /* put radiotap header in front of 802.11 header and data */
3267 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
3269 /* initialise radiotap header */
3270 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3271 iwl4965_rt->rt_hdr.it_pad = 0;
3273 /* total header + data */
3274 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
3275 &iwl4965_rt->rt_hdr.it_len);
3277 /* Indicate all the fields we add to the radiotap header */
3278 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3279 (1 << IEEE80211_RADIOTAP_FLAGS) |
3280 (1 << IEEE80211_RADIOTAP_RATE) |
3281 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3282 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3283 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3284 (1 << IEEE80211_RADIOTAP_ANTENNA)),
3285 &iwl4965_rt->rt_hdr.it_present);
3287 /* Zero the flags, we'll add to them as we go */
3288 iwl4965_rt->rt_flags = 0;
3290 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
3292 iwl4965_rt->rt_dbmsignal = signal;
3293 iwl4965_rt->rt_dbmnoise = noise;
3295 /* Convert the channel frequency and set the flags */
3296 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
3297 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3298 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3299 IEEE80211_CHAN_5GHZ),
3300 &iwl4965_rt->rt_chbitmask);
3301 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3302 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
3303 IEEE80211_CHAN_2GHZ),
3304 &iwl4965_rt->rt_chbitmask);
3306 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3307 IEEE80211_CHAN_2GHZ),
3308 &iwl4965_rt->rt_chbitmask);
3311 iwl4965_rt->rt_rate = 0;
3313 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
3318 * It seems that the antenna field in the phy flags value
3319 * is actually a bitfield. This is undefined by radiotap,
3320 * it wants an actual antenna number but I always get "7"
3321 * for most legacy frames I receive indicating that the
3322 * same frame was received on all three RX chains.
3324 * I think this field should be removed in favour of a
3325 * new 802.11n radiotap field "RX chains" that is defined
3328 iwl4965_rt->rt_antenna =
3329 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3331 /* set the preamble flag if appropriate */
3332 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3333 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3335 stats->flag |= RX_FLAG_RADIOTAP;
3338 static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
3340 struct iwl4965_rx_mem_buffer *rxb,
3341 struct ieee80211_rx_status *stats)
3343 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
3344 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3345 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3346 struct ieee80211_hdr *hdr;
3349 unsigned int skblen;
3352 if (!include_phy && priv->last_phy_res[0])
3353 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3356 IWL_ERROR("MPDU frame without a PHY data\n");
3360 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3361 rx_start->cfg_phy_cnt);
3363 len = le16_to_cpu(rx_start->byte_count);
3365 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3366 sizeof(struct iwl4965_rx_phy_res) +
3367 rx_start->cfg_phy_cnt + len);
3370 struct iwl4965_rx_mpdu_res_start *amsdu =
3371 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3373 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3374 sizeof(struct iwl4965_rx_mpdu_res_start));
3375 len = le16_to_cpu(amsdu->byte_count);
3376 rx_start->byte_count = amsdu->byte_count;
3377 rx_end = (__le32 *) (((u8 *) hdr) + len);
3379 if (len > priv->hw_setting.max_pkt_size || len < 16) {
3380 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
3384 ampdu_status = le32_to_cpu(*rx_end);
3385 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3387 /* start from MAC */
3388 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3389 skb_put(rxb->skb, len); /* end where data ends */
3391 /* We only process data packets if the interface is open */
3392 if (unlikely(!priv->is_open)) {
3393 IWL_DEBUG_DROP_LIMIT
3394 ("Dropping packet while interface is not open.\n");
3399 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3401 if (iwl4965_param_hwcrypto)
3402 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
3404 if (priv->add_radiotap)
3405 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3407 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3408 priv->alloc_rxb_skb--;
3411 priv->led_packets += len;
3412 iwl4965_setup_activity_timer(priv);
3416 /* Calc max signal level (dBm) among 3 possible receivers */
3417 static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3419 /* data from PHY/DSP regarding signal strength, etc.,
3420 * contents are always there, not configurable by host. */
3421 struct iwl4965_rx_non_cfg_phy *ncphy =
3422 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3423 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3426 u32 valid_antennae =
3427 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3428 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3432 /* Find max rssi among 3 possible receivers.
3433 * These values are measured by the digital signal processor (DSP).
3434 * They should stay fairly constant even as the signal strength varies,
3435 * if the radio's automatic gain control (AGC) is working right.
3436 * AGC value (see below) will provide the "interesting" info. */
3437 for (i = 0; i < 3; i++)
3438 if (valid_antennae & (1 << i))
3439 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3441 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3442 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3445 /* dBm = max_rssi dB - agc dB - constant.
3446 * Higher AGC (higher radio gain) means lower signal. */
3447 return (max_rssi - agc - IWL_RSSI_OFFSET);
3450 #ifdef CONFIG_IWL4965_HT
3452 /* Parsed Information Elements */
3453 struct ieee802_11_elems {
3463 u8 ht_cap_param_len;
3465 u8 ht_extra_param_len;
3468 static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems)
3474 memset(elems, 0, sizeof(*elems));
3487 case WLAN_EID_DS_PARAMS:
3488 elems->ds_params = pos;
3489 elems->ds_params_len = elen;
3493 elems->tim_len = elen;
3495 case WLAN_EID_IBSS_PARAMS:
3496 elems->ibss_params = pos;
3497 elems->ibss_params_len = elen;
3499 case WLAN_EID_ERP_INFO:
3500 elems->erp_info = pos;
3501 elems->erp_info_len = elen;
3503 case WLAN_EID_HT_CAPABILITY:
3504 elems->ht_cap_param = pos;
3505 elems->ht_cap_param_len = elen;
3507 case WLAN_EID_HT_EXTRA_INFO:
3508 elems->ht_extra_param = pos;
3509 elems->ht_extra_param_len = elen;
3523 void iwl4965_init_ht_hw_capab(struct ieee80211_ht_info *ht_info,
3524 enum ieee80211_band band)
3527 memset(ht_info->supp_mcs_set, 0, 16);
3529 ht_info->ht_supported = 1;
3531 if (band == IEEE80211_BAND_5GHZ) {
3532 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3533 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3534 ht_info->supp_mcs_set[4] = 0x01;
3536 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3537 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3538 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3539 (IWL_MIMO_PS_NONE << 2));
3540 if (iwl4965_param_amsdu_size_8K) {
3541 printk(KERN_DEBUG "iwl4965 in A-MSDU 8K support mode\n");
3542 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3545 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3546 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3548 ht_info->supp_mcs_set[0] = 0xFF;
3549 ht_info->supp_mcs_set[1] = 0xFF;
3551 #endif /* CONFIG_IWL4965_HT */
3553 static void iwl4965_sta_modify_ps_wake(struct iwl4965_priv *priv, int sta_id)
3555 unsigned long flags;
3557 spin_lock_irqsave(&priv->sta_lock, flags);
3558 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3559 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3560 priv->stations[sta_id].sta.sta.modify_mask = 0;
3561 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3562 spin_unlock_irqrestore(&priv->sta_lock, flags);
3564 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3567 static void iwl4965_update_ps_mode(struct iwl4965_priv *priv, u16 ps_bit, u8 *addr)
3569 /* FIXME: need locking over ps_status ??? */
3570 u8 sta_id = iwl4965_hw_find_station(priv, addr);
3572 if (sta_id != IWL_INVALID_STATION) {
3573 u8 sta_awake = priv->stations[sta_id].
3574 ps_status == STA_PS_STATUS_WAKE;
3576 if (sta_awake && ps_bit)
3577 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3578 else if (!sta_awake && !ps_bit) {
3579 iwl4965_sta_modify_ps_wake(priv, sta_id);
3580 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3584 #ifdef CONFIG_IWL4965_DEBUG
3587 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3589 * You may hack this function to show different aspects of received frames,
3590 * including selective frame dumps.
3591 * group100 parameter selects whether to show 1 out of 100 good frames.
3593 * TODO: This was originally written for 3945, need to audit for
3594 * proper operation with 4965.
3596 static void iwl4965_dbg_report_frame(struct iwl4965_priv *priv,
3597 struct iwl4965_rx_packet *pkt,
3598 struct ieee80211_hdr *header, int group100)
3601 u32 print_summary = 0;
3602 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3619 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3620 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3621 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3622 u8 *data = IWL_RX_DATA(pkt);
3624 if (likely(!(iwl4965_debug_level & IWL_DL_RX)))
3628 fc = le16_to_cpu(header->frame_control);
3629 seq_ctl = le16_to_cpu(header->seq_ctrl);
3632 channel = le16_to_cpu(rx_hdr->channel);
3633 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3634 rate_sym = rx_hdr->rate;
3635 length = le16_to_cpu(rx_hdr->len);
3637 /* end-of-frame status and timestamp */
3638 status = le32_to_cpu(rx_end->status);
3639 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3640 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3641 tsf = le64_to_cpu(rx_end->timestamp);
3643 /* signal statistics */
3644 rssi = rx_stats->rssi;
3645 agc = rx_stats->agc;
3646 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3647 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3649 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3651 /* if data frame is to us and all is good,
3652 * (optionally) print summary for only 1 out of every 100 */
3653 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3654 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3657 print_summary = 1; /* print each frame */
3658 else if (priv->framecnt_to_us < 100) {
3659 priv->framecnt_to_us++;
3662 priv->framecnt_to_us = 0;
3667 /* print summary for all other frames */
3671 if (print_summary) {
3677 title = "100Frames";
3678 else if (fc & IEEE80211_FCTL_RETRY)
3680 else if (ieee80211_is_assoc_response(fc))
3682 else if (ieee80211_is_reassoc_response(fc))
3684 else if (ieee80211_is_probe_response(fc)) {
3686 print_dump = 1; /* dump frame contents */
3687 } else if (ieee80211_is_beacon(fc)) {
3689 print_dump = 1; /* dump frame contents */
3690 } else if (ieee80211_is_atim(fc))
3692 else if (ieee80211_is_auth(fc))
3694 else if (ieee80211_is_deauth(fc))
3696 else if (ieee80211_is_disassoc(fc))
3701 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3702 if (unlikely(rate_idx == -1))
3705 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3707 /* print frame summary.
3708 * MAC addresses show just the last byte (for brevity),
3709 * but you can hack it to show more, if you'd like to. */
3711 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3712 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3713 title, fc, header->addr1[5],
3714 length, rssi, channel, bitrate);
3716 /* src/dst addresses assume managed mode */
3717 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3718 "src=0x%02x, rssi=%u, tim=%lu usec, "
3719 "phy=0x%02x, chnl=%d\n",
3720 title, fc, header->addr1[5],
3721 header->addr3[5], rssi,
3722 tsf_low - priv->scan_start_tsf,
3723 phy_flags, channel);
3727 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
3730 static inline void iwl4965_dbg_report_frame(struct iwl4965_priv *priv,
3731 struct iwl4965_rx_packet *pkt,
3732 struct ieee80211_hdr *header,
3739 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
3741 /* Called for REPLY_4965_RX (legacy ABG frames), or
3742 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3743 static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
3744 struct iwl4965_rx_mem_buffer *rxb)
3746 struct ieee80211_hdr *header;
3747 struct ieee80211_rx_status rx_status;
3748 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3749 /* Use phy data (Rx signal strength, etc.) contained within
3750 * this rx packet for legacy frames,
3751 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3752 int include_phy = (pkt->hdr.cmd == REPLY_4965_RX);
3753 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3754 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3755 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3757 unsigned int len = 0;
3761 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3762 rx_status.freq = ieee80211chan2mhz(le16_to_cpu(rx_start->channel));
3763 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3764 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3765 rx_status.rate_idx = iwl4965_hwrate_to_plcp_idx(
3766 le32_to_cpu(rx_start->rate_n_flags));
3768 if (rx_status.band == IEEE80211_BAND_5GHZ)
3769 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3771 rx_status.antenna = 0;
3774 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3776 ("dsp size out of range [0,20]: "
3777 "%d/n", rx_start->cfg_phy_cnt);
3782 if (priv->last_phy_res[0])
3783 rx_start = (struct iwl4965_rx_phy_res *)
3784 &priv->last_phy_res[1];
3790 IWL_ERROR("MPDU frame without a PHY data\n");
3795 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3796 + rx_start->cfg_phy_cnt);
3798 len = le16_to_cpu(rx_start->byte_count);
3799 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
3800 sizeof(struct iwl4965_rx_phy_res) + len);
3802 struct iwl4965_rx_mpdu_res_start *amsdu =
3803 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3805 header = (void *)(pkt->u.raw +
3806 sizeof(struct iwl4965_rx_mpdu_res_start));
3807 len = le16_to_cpu(amsdu->byte_count);
3808 rx_end = (__le32 *) (pkt->u.raw +
3809 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
3812 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
3813 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
3814 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
3815 le32_to_cpu(*rx_end));
3819 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3821 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
3822 rx_status.ssi = iwl4965_calc_rssi(rx_start);
3824 /* Meaningful noise values are available only from beacon statistics,
3825 * which are gathered only when associated, and indicate noise
3826 * only for the associated network channel ...
3827 * Ignore these noise values while scanning (other channels) */
3828 if (iwl4965_is_associated(priv) &&
3829 !test_bit(STATUS_SCANNING, &priv->status)) {
3830 rx_status.noise = priv->last_rx_noise;
3831 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
3834 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3835 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
3838 /* Reset beacon noise level if not associated. */
3839 if (!iwl4965_is_associated(priv))
3840 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3842 /* Set "1" to report good data frames in groups of 100 */
3843 /* FIXME: need to optimze the call: */
3844 iwl4965_dbg_report_frame(priv, pkt, header, 1);
3846 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
3847 rx_status.ssi, rx_status.noise, rx_status.signal,
3850 network_packet = iwl4965_is_network_packet(priv, header);
3851 if (network_packet) {
3852 priv->last_rx_rssi = rx_status.ssi;
3853 priv->last_beacon_time = priv->ucode_beacon_time;
3854 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3857 fc = le16_to_cpu(header->frame_control);
3858 switch (fc & IEEE80211_FCTL_FTYPE) {
3859 case IEEE80211_FTYPE_MGMT:
3861 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3862 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3864 switch (fc & IEEE80211_FCTL_STYPE) {
3865 case IEEE80211_STYPE_PROBE_RESP:
3866 case IEEE80211_STYPE_BEACON:
3867 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA &&
3868 !compare_ether_addr(header->addr2, priv->bssid)) ||
3869 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
3870 !compare_ether_addr(header->addr3, priv->bssid))) {
3871 struct ieee80211_mgmt *mgmt =
3872 (struct ieee80211_mgmt *)header;
3874 le64_to_cpu(mgmt->u.beacon.timestamp);
3876 priv->timestamp0 = timestamp & 0xFFFFFFFF;
3878 (timestamp >> 32) & 0xFFFFFFFF;
3879 priv->beacon_int = le16_to_cpu(
3880 mgmt->u.beacon.beacon_int);
3881 if (priv->call_post_assoc_from_beacon &&
3882 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
3883 priv->call_post_assoc_from_beacon = 0;
3884 queue_work(priv->workqueue,
3885 &priv->post_associate.work);
3890 case IEEE80211_STYPE_ACTION:
3894 * TODO: Use the new callback function from
3895 * mac80211 instead of sniffing these packets.
3897 case IEEE80211_STYPE_ASSOC_RESP:
3898 case IEEE80211_STYPE_REASSOC_RESP:
3899 if (network_packet) {
3900 #ifdef CONFIG_IWL4965_HT
3902 struct ieee802_11_elems elems;
3903 #endif /*CONFIG_IWL4965_HT */
3904 struct ieee80211_mgmt *mgnt =
3905 (struct ieee80211_mgmt *)header;
3907 /* We have just associated, give some
3908 * time for the 4-way handshake if
3909 * any. Don't start scan too early. */
3910 priv->next_scan_jiffies = jiffies +
3911 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
3913 priv->assoc_id = (~((1 << 15) | (1 << 14))
3914 & le16_to_cpu(mgnt->u.assoc_resp.aid));
3915 priv->assoc_capability =
3917 mgnt->u.assoc_resp.capab_info);
3918 #ifdef CONFIG_IWL4965_HT
3919 pos = mgnt->u.assoc_resp.variable;
3920 if (!parse_elems(pos,
3921 len - (pos - (u8 *) mgnt),
3923 if (elems.ht_extra_param &&
3927 #endif /*CONFIG_IWL4965_HT */
3928 /* assoc_id is 0 no association */
3929 if (!priv->assoc_id)
3931 if (priv->beacon_int)
3932 queue_work(priv->workqueue,
3933 &priv->post_associate.work);
3935 priv->call_post_assoc_from_beacon = 1;
3940 case IEEE80211_STYPE_PROBE_REQ:
3941 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
3942 !iwl4965_is_associated(priv)) {
3943 DECLARE_MAC_BUF(mac1);
3944 DECLARE_MAC_BUF(mac2);
3945 DECLARE_MAC_BUF(mac3);
3947 IWL_DEBUG_DROP("Dropping (non network): "
3949 print_mac(mac1, header->addr1),
3950 print_mac(mac2, header->addr2),
3951 print_mac(mac3, header->addr3));
3955 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
3958 case IEEE80211_FTYPE_CTL:
3959 #ifdef CONFIG_IWL4965_HT
3960 switch (fc & IEEE80211_FCTL_STYPE) {
3961 case IEEE80211_STYPE_BACK_REQ:
3962 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
3963 iwl4965_handle_data_packet(priv, 0, include_phy,
3972 case IEEE80211_FTYPE_DATA: {
3973 DECLARE_MAC_BUF(mac1);
3974 DECLARE_MAC_BUF(mac2);
3975 DECLARE_MAC_BUF(mac3);
3977 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3978 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3981 if (unlikely(!network_packet))
3982 IWL_DEBUG_DROP("Dropping (non network): "
3984 print_mac(mac1, header->addr1),
3985 print_mac(mac2, header->addr2),
3986 print_mac(mac3, header->addr3));
3987 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
3988 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
3989 print_mac(mac1, header->addr1),
3990 print_mac(mac2, header->addr2),
3991 print_mac(mac3, header->addr3));
3993 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
4003 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4004 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4005 static void iwl4965_rx_reply_rx_phy(struct iwl4965_priv *priv,
4006 struct iwl4965_rx_mem_buffer *rxb)
4008 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4009 priv->last_phy_res[0] = 1;
4010 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4011 sizeof(struct iwl4965_rx_phy_res));
4014 static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4015 struct iwl4965_rx_mem_buffer *rxb)
4018 #ifdef CONFIG_IWL4965_SENSITIVITY
4019 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4020 struct iwl4965_missed_beacon_notif *missed_beacon;
4022 missed_beacon = &pkt->u.missed_beacon;
4023 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4024 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4025 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4026 le32_to_cpu(missed_beacon->total_missed_becons),
4027 le32_to_cpu(missed_beacon->num_recvd_beacons),
4028 le32_to_cpu(missed_beacon->num_expected_beacons));
4029 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4030 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4031 queue_work(priv->workqueue, &priv->sensitivity_work);
4033 #endif /*CONFIG_IWL4965_SENSITIVITY*/
4036 #ifdef CONFIG_IWL4965_HT
4039 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4041 static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4042 int sta_id, int tid)
4044 unsigned long flags;
4046 /* Remove "disable" flag, to enable Tx for this TID */
4047 spin_lock_irqsave(&priv->sta_lock, flags);
4048 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4049 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4050 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4051 spin_unlock_irqrestore(&priv->sta_lock, flags);
4053 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4057 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4059 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4060 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4062 static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4063 struct iwl4965_ht_agg *agg,
4064 struct iwl4965_compressed_ba_resp*
4069 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4070 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4073 struct ieee80211_tx_status *tx_status;
4075 if (unlikely(!agg->wait_for_ba)) {
4076 IWL_ERROR("Received BA when not expected\n");
4080 /* Mark that the expected block-ack response arrived */
4081 agg->wait_for_ba = 0;
4082 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
4084 /* Calculate shift to align block-ack bits with our Tx window bits */
4085 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
4086 if (sh < 0) /* tbw something is wrong with indices */
4089 /* don't use 64-bit values for now */
4090 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
4092 if (agg->frame_count > (64 - sh)) {
4093 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4097 /* check for success or failure according to the
4098 * transmitted bitmap and block-ack bitmap */
4099 bitmap &= agg->bitmap;
4101 /* For each frame attempted in aggregation,
4102 * update driver's record of tx frame's status. */
4103 for (i = 0; i < agg->frame_count ; i++) {
4104 ack = bitmap & (1 << i);
4106 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4107 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4108 agg->start_idx + i);
4111 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4112 tx_status->flags = IEEE80211_TX_STATUS_ACK;
4113 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4114 tx_status->ampdu_ack_map = successes;
4115 tx_status->ampdu_ack_len = agg->frame_count;
4116 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4117 &tx_status->control);
4119 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", bitmap);
4125 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4127 static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv,
4130 /* Simply stop the queue, but don't change any configuration;
4131 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4132 iwl4965_write_prph(priv,
4133 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4134 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4135 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4139 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4141 static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4142 u16 ssn_idx, u8 tx_fifo)
4144 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4145 IWL_WARNING("queue number too small: %d, must be > %d\n",
4146 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4150 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4152 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4154 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4155 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4156 /* supposes that ssn_idx is valid (!= 0xFFF) */
4157 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4159 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4160 iwl4965_txq_ctx_deactivate(priv, txq_id);
4161 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4166 int iwl4965_check_empty_hw_queue(struct iwl4965_priv *priv, int sta_id,
4169 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4170 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4171 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4173 switch (priv->stations[sta_id].tid[tid].agg.state) {
4174 case IWL_EMPTYING_HW_QUEUE_DELBA:
4175 /* We are reclaiming the last packet of the */
4176 /* aggregated HW queue */
4177 if (txq_id == tid_data->agg.txq_id &&
4178 q->read_ptr == q->write_ptr) {
4179 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4180 int tx_fifo = default_tid_to_tx_fifo[tid];
4181 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4182 iwl4965_tx_queue_agg_disable(priv, txq_id,
4184 tid_data->agg.state = IWL_AGG_OFF;
4185 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4188 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4189 /* We are reclaiming the last packet of the queue */
4190 if (tid_data->tfds_in_queue == 0) {
4191 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4192 tid_data->agg.state = IWL_AGG_ON;
4193 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4201 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4202 * @index -- current index
4203 * @n_bd -- total number of entries in queue (s/b power of 2)
4205 static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4207 return (index == 0) ? n_bd - 1 : index - 1;
4211 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4213 * Handles block-acknowledge notification from device, which reports success
4214 * of frames sent via aggregation.
4216 static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4217 struct iwl4965_rx_mem_buffer *rxb)
4219 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4220 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
4222 struct iwl4965_tx_queue *txq = NULL;
4223 struct iwl4965_ht_agg *agg;
4224 DECLARE_MAC_BUF(mac);
4226 /* "flow" corresponds to Tx queue */
4227 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4229 /* "ssn" is start of block-ack Tx window, corresponds to index
4230 * (in Tx queue's circular buffer) of first TFD/frame in window */
4231 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4233 if (scd_flow >= ARRAY_SIZE(priv->txq)) {
4234 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4238 txq = &priv->txq[scd_flow];
4239 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4241 /* Find index just before block-ack window */
4242 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4244 /* TODO: Need to get this copy more safely - now good for debug */
4246 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4249 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
4251 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
4252 "%d, scd_ssn = %d\n",
4258 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
4262 /* Update driver's record of ACK vs. not for each frame in window */
4263 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
4265 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4266 * block-ack window (we assume that they've been successfully
4267 * transmitted ... if not, it's too late anyway). */
4268 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4269 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4270 priv->stations[ba_resp->sta_id].
4271 tid[ba_resp->tid].tfds_in_queue -= freed;
4272 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4273 priv->mac80211_registered &&
4274 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4275 ieee80211_wake_queue(priv->hw, scd_flow);
4276 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4277 ba_resp->tid, scd_flow);
4282 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4284 static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4291 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4293 tbl_dw_addr = priv->scd_base_addr +
4294 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4296 tbl_dw = iwl4965_read_targ_mem(priv, tbl_dw_addr);
4299 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4301 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4303 iwl4965_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
4310 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4312 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4313 * i.e. it must be one of the higher queues used for aggregation
4315 static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4316 int tx_fifo, int sta_id, int tid,
4319 unsigned long flags;
4323 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
4324 IWL_WARNING("queue number too small: %d, must be > %d\n",
4325 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4327 ra_tid = BUILD_RAxTID(sta_id, tid);
4329 /* Modify device's station table to Tx this TID */
4330 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
4332 spin_lock_irqsave(&priv->lock, flags);
4333 rc = iwl4965_grab_nic_access(priv);
4335 spin_unlock_irqrestore(&priv->lock, flags);
4339 /* Stop this Tx queue before configuring it */
4340 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4342 /* Map receiver-address / traffic-ID to this queue */
4343 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4345 /* Set this queue as a chain-building queue */
4346 iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4348 /* Place first TFD at index corresponding to start sequence number.
4349 * Assumes that ssn_idx is valid (!= 0xFFF) */
4350 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4351 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4352 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4354 /* Set up Tx window size and frame limit for this queue */
4355 iwl4965_write_targ_mem(priv,
4356 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4357 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4358 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4360 iwl4965_write_targ_mem(priv, priv->scd_base_addr +
4361 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4362 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4363 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4365 iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4367 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
4368 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4370 iwl4965_release_nic_access(priv);
4371 spin_unlock_irqrestore(&priv->lock, flags);
4376 #endif /* CONFIG_IWL4965_HT */
4379 * iwl4965_add_station - Initialize a station's hardware rate table
4381 * The uCode's station table contains a table of fallback rates
4382 * for automatic fallback during transmission.
4384 * NOTE: This sets up a default set of values. These will be replaced later
4385 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4388 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4389 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4390 * which requires station table entry to exist).
4392 void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4395 struct iwl4965_link_quality_cmd link_cmd = {
4400 /* Set up the rate scaling to start at selected rate, fall back
4401 * all the way down to 1M in IEEE order, and then spin on 1M */
4403 r = IWL_RATE_54M_INDEX;
4404 else if (priv->band == IEEE80211_BAND_5GHZ)
4405 r = IWL_RATE_6M_INDEX;
4407 r = IWL_RATE_1M_INDEX;
4409 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4411 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4412 rate_flags |= RATE_MCS_CCK_MSK;
4414 /* Use Tx antenna B only */
4415 rate_flags |= RATE_MCS_ANT_B_MSK;
4416 rate_flags &= ~RATE_MCS_ANT_A_MSK;
4418 link_cmd.rs_table[i].rate_n_flags =
4419 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4420 r = iwl4965_get_prev_ieee_rate(r);
4423 link_cmd.general_params.single_stream_ant_msk = 2;
4424 link_cmd.general_params.dual_stream_ant_msk = 3;
4425 link_cmd.agg_params.agg_dis_start_th = 3;
4426 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4428 /* Update the rate scaling for control frame Tx to AP */
4429 link_cmd.sta_id = is_ap ? IWL_AP_ID : IWL4965_BROADCAST_ID;
4431 iwl4965_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd),
4435 #ifdef CONFIG_IWL4965_HT
4437 static u8 iwl4965_is_channel_extension(struct iwl4965_priv *priv,
4438 enum ieee80211_band band,
4439 u16 channel, u8 extension_chan_offset)
4441 const struct iwl4965_channel_info *ch_info;
4443 ch_info = iwl4965_get_channel_info(priv, band, channel);
4444 if (!is_channel_valid(ch_info))
4447 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
4450 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4451 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4457 static u8 iwl4965_is_fat_tx_allowed(struct iwl4965_priv *priv,
4458 struct ieee80211_ht_info *sta_ht_inf)
4460 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
4462 if ((!iwl_ht_conf->is_ht) ||
4463 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
4464 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
4468 if ((!sta_ht_inf->ht_supported) ||
4469 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
4473 return (iwl4965_is_channel_extension(priv, priv->band,
4474 iwl_ht_conf->control_channel,
4475 iwl_ht_conf->extension_chan_offset));
4478 void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct iwl_ht_info *ht_info)
4480 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
4483 if (!ht_info->is_ht)
4486 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
4487 if (iwl4965_is_fat_tx_allowed(priv, NULL))
4488 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4490 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4491 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4493 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4494 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4495 le16_to_cpu(rxon->channel),
4496 ht_info->control_channel);
4497 rxon->channel = cpu_to_le16(ht_info->control_channel);
4501 /* Note: control channel is opposite of extension channel */
4502 switch (ht_info->extension_chan_offset) {
4503 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4504 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4506 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4507 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4509 case IWL_EXT_CHANNEL_OFFSET_NONE:
4511 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4515 val = ht_info->ht_protection;
4517 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4519 iwl4965_set_rxon_chain(priv);
4521 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4522 "rxon flags 0x%X operation mode :0x%X "
4523 "extension channel offset 0x%x "
4524 "control chan %d\n",
4525 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4526 le32_to_cpu(rxon->flags), ht_info->ht_protection,
4527 ht_info->extension_chan_offset,
4528 ht_info->control_channel);
4532 void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
4533 struct ieee80211_ht_info *sta_ht_inf)
4538 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
4541 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4543 sta_flags = priv->stations[index].sta.station_flags;
4545 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4547 switch (mimo_ps_mode) {
4548 case WLAN_HT_CAP_MIMO_PS_STATIC:
4549 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4551 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
4552 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
4554 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4557 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
4561 sta_flags |= cpu_to_le32(
4562 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
4564 sta_flags |= cpu_to_le32(
4565 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
4567 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
4568 sta_flags |= STA_FLG_FAT_EN_MSK;
4570 sta_flags &= ~STA_FLG_FAT_EN_MSK;
4572 priv->stations[index].sta.station_flags = sta_flags;
4577 static void iwl4965_sta_modify_add_ba_tid(struct iwl4965_priv *priv,
4578 int sta_id, int tid, u16 ssn)
4580 unsigned long flags;
4582 spin_lock_irqsave(&priv->sta_lock, flags);
4583 priv->stations[sta_id].sta.station_flags_msk = 0;
4584 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
4585 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
4586 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
4587 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4588 spin_unlock_irqrestore(&priv->sta_lock, flags);
4590 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4593 static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv,
4594 int sta_id, int tid)
4596 unsigned long flags;
4598 spin_lock_irqsave(&priv->sta_lock, flags);
4599 priv->stations[sta_id].sta.station_flags_msk = 0;
4600 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4601 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4602 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4603 spin_unlock_irqrestore(&priv->sta_lock, flags);
4605 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4609 * Find first available (lowest unused) Tx Queue, mark it "active".
4610 * Called only when finding queue for aggregation.
4611 * Should never return anything < 7, because they should already
4612 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4614 static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv)
4618 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
4619 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4624 static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4625 u16 tid, u16 *start_seq_num)
4627 struct iwl4965_priv *priv = hw->priv;
4633 unsigned long flags;
4634 struct iwl4965_tid_data *tid_data;
4635 DECLARE_MAC_BUF(mac);
4637 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4638 tx_fifo = default_tid_to_tx_fifo[tid];
4642 IWL_WARNING("%s on da = %s tid = %d\n",
4643 __func__, print_mac(mac, da), tid);
4645 sta_id = iwl4965_hw_find_station(priv, da);
4646 if (sta_id == IWL_INVALID_STATION)
4649 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4650 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4654 txq_id = iwl4965_txq_ctx_activate_free(priv);
4658 spin_lock_irqsave(&priv->sta_lock, flags);
4659 tid_data = &priv->stations[sta_id].tid[tid];
4660 ssn = SEQ_TO_SN(tid_data->seq_number);
4661 tid_data->agg.txq_id = txq_id;
4662 spin_unlock_irqrestore(&priv->sta_lock, flags);
4664 *start_seq_num = ssn;
4665 rc = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4671 if (tid_data->tfds_in_queue == 0) {
4672 printk(KERN_ERR "HW queue is empty\n");
4673 tid_data->agg.state = IWL_AGG_ON;
4674 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4676 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4677 tid_data->tfds_in_queue);
4678 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4683 static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4687 struct iwl4965_priv *priv = hw->priv;
4688 int tx_fifo_id, txq_id, sta_id, ssn = -1;
4689 struct iwl4965_tid_data *tid_data;
4690 int rc, write_ptr, read_ptr;
4691 unsigned long flags;
4692 DECLARE_MAC_BUF(mac);
4695 IWL_ERROR("da = NULL\n");
4699 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4700 tx_fifo_id = default_tid_to_tx_fifo[tid];
4704 sta_id = iwl4965_hw_find_station(priv, da);
4706 if (sta_id == IWL_INVALID_STATION)
4709 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4710 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4712 tid_data = &priv->stations[sta_id].tid[tid];
4713 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4714 txq_id = tid_data->agg.txq_id;
4715 write_ptr = priv->txq[txq_id].q.write_ptr;
4716 read_ptr = priv->txq[txq_id].q.read_ptr;
4718 /* The queue is not empty */
4719 if (write_ptr != read_ptr) {
4720 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4721 priv->stations[sta_id].tid[tid].agg.state =
4722 IWL_EMPTYING_HW_QUEUE_DELBA;
4726 IWL_DEBUG_HT("HW queue empty\n");;
4727 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4729 spin_lock_irqsave(&priv->lock, flags);
4730 rc = iwl4965_grab_nic_access(priv);
4732 spin_unlock_irqrestore(&priv->lock, flags);
4735 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4736 iwl4965_release_nic_access(priv);
4737 spin_unlock_irqrestore(&priv->lock, flags);
4742 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
4744 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4745 print_mac(mac, da), tid);
4750 int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4751 enum ieee80211_ampdu_mlme_action action,
4752 const u8 *addr, u16 tid, u16 *ssn)
4754 struct iwl4965_priv *priv = hw->priv;
4756 DECLARE_MAC_BUF(mac);
4758 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ",
4759 print_mac(mac, addr), tid);
4760 sta_id = iwl4965_hw_find_station(priv, addr);
4762 case IEEE80211_AMPDU_RX_START:
4763 IWL_DEBUG_HT("start Rx\n");
4764 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn);
4766 case IEEE80211_AMPDU_RX_STOP:
4767 IWL_DEBUG_HT("stop Rx\n");
4768 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4770 case IEEE80211_AMPDU_TX_START:
4771 IWL_DEBUG_HT("start Tx\n");
4772 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn);
4773 case IEEE80211_AMPDU_TX_STOP:
4774 IWL_DEBUG_HT("stop Tx\n");
4775 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4777 IWL_DEBUG_HT("unknown\n");
4784 #endif /* CONFIG_IWL4965_HT */
4786 /* Set up 4965-specific Rx frame reply handlers */
4787 void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv)
4789 /* Legacy Rx frames */
4790 priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx;
4792 /* High-throughput (HT) Rx frames */
4793 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4794 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4796 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4797 iwl4965_rx_missed_beacon_notif;
4799 #ifdef CONFIG_IWL4965_HT
4800 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4801 #endif /* CONFIG_IWL4965_HT */
4804 void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv)
4806 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4807 INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
4808 #ifdef CONFIG_IWL4965_SENSITIVITY
4809 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4811 init_timer(&priv->statistics_periodic);
4812 priv->statistics_periodic.data = (unsigned long)priv;
4813 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4816 void iwl4965_hw_cancel_deferred_work(struct iwl4965_priv *priv)
4818 del_timer_sync(&priv->statistics_periodic);
4820 cancel_delayed_work(&priv->init_alive_start);
4823 struct pci_device_id iwl4965_hw_card_ids[] = {
4824 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4229)},
4825 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4230)},
4830 * The device's EEPROM semaphore prevents conflicts between driver and uCode
4831 * when accessing the EEPROM; each access is a series of pulses to/from the
4832 * EEPROM chip, not a single event, so even reads could conflict if they
4833 * weren't arbitrated by the semaphore.
4835 int iwl4965_eeprom_acquire_semaphore(struct iwl4965_priv *priv)
4840 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
4841 /* Request semaphore */
4842 iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG,
4843 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
4845 /* See if we got it */
4846 rc = iwl4965_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
4847 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4848 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
4849 EEPROM_SEM_TIMEOUT);
4851 IWL_DEBUG_IO("Acquired semaphore after %d tries.\n",
4860 MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);