F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
S: Maintained
F: net/ipv4/
F: net/ipv6/
F: include/net/ip*
+F: arch/x86/net/*
NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
F: drivers/ssb/
F: include/linux/ssb/
+ BROADCOM SPECIFIC AMBA DRIVER (BCMA)
+ S: Maintained
+ F: drivers/bcma/
+ F: include/linux/bcma/
+
SONY VAIO CONTROL DEVICE DRIVER
F: drivers/usb/host/uhci*
USB "USBNET" DRIVER FRAMEWORK
W: http://www.linux-usb.org/usbnet
S: Maintained
S: Maintained
F: drivers/platform/x86
+XEN NETWORK BACKEND DRIVER
+S: Supported
+F: drivers/net/xen-netback/*
+
XEN PCI SUBSYSTEM
#define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
- * Since we later substract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
+ * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
* configuration we need to make sure it is bigger than that. */
if (bc_tsf == -1) {
intval |= AR5K_BEACON_RESET_TSF;
} else if (bc_tsf > hw_tsf) {
/*
- * beacon received, SW merge happend but HW TSF not yet updated.
+ * beacon received, SW merge happened but HW TSF not yet updated.
* not possible to reconfigure timers yet, but next time we
* receive a beacon with the same BSSID, the hardware will
* automatically update the TSF and then we need to reconfigure
spin_lock_init(&sc->rxbuflock);
spin_lock_init(&sc->txbuflock);
spin_lock_init(&sc->block);
-
+ spin_lock_init(&sc->irqlock);
/* Setup interrupt handler */
ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
synchronize_irq(sc->irq);
stop_tasklets(sc);
- /* Save ani mode and disable ANI durring
+ /* Save ani mode and disable ANI during
* reset. If we don't we might get false
* PHY error interrupts. */
ani_mode = ah->ah_sc->ani_state.ani_mode;
.regDmn = { LE16(0), LE16(0x1f) },
.txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
.opCapFlags = {
- .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .opFlags = AR5416_OPFLAGS_11A,
.eepMisc = 0,
},
.rfSilent = 0,
.db_stage2 = {3, 3, 3}, /* 3 chain */
.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
.db_stage4 = {3, 3, 3}, /* don't exist for 2G */
- .xpaBiasLvl = 0,
+ .xpaBiasLvl = 0xf,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
.txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
eep = ar9003_eeprom_struct_find_by_id(reference);
if (eep == NULL) {
ath_dbg(common, ATH_DBG_EEPROM,
- "cant find reference eeprom struct %d\n",
+ "can't find reference eeprom struct %d\n",
reference);
return -1;
}
{
int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
else {
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
- if (!AR_SREV_9340(ah)) {
- REG_RMW_FIELD(ah, AR_CH0_THERM,
- AR_CH0_THERM_XPABIASLVL_MSB,
- bias >> 2);
- REG_RMW_FIELD(ah, AR_CH0_THERM,
- AR_CH0_THERM_XPASHORT2GND, 1);
- }
+ REG_RMW_FIELD(ah, AR_CH0_THERM,
+ AR_CH0_THERM_XPABIASLVL_MSB,
+ bias >> 2);
+ REG_RMW_FIELD(ah, AR_CH0_THERM,
+ AR_CH0_THERM_XPASHORT2GND, 1);
}
}
static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
{
int chain;
+ u32 regval;
+ u32 ant_div_ctl1;
static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
AR_PHY_SWITCH_CHAIN_0,
AR_PHY_SWITCH_CHAIN_1,
if (AR_SREV_9485(ah)) {
value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
- REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_CTRL_ALL,
- value);
- REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE,
- value >> 6);
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE,
- value >> 7);
+ /*
+ * main_lnaconf, alt_lnaconf, main_tb, alt_tb
+ * are the fields present
+ */
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= (~AR_ANT_DIV_CTRL_ALL);
+ regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
+ /* enable_lnadiv */
+ regval &= (~AR_PHY_9485_ANT_DIV_LNADIV);
+ regval |= ((value >> 6) & 0x1) <<
+ AR_PHY_9485_ANT_DIV_LNADIV_S;
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+ /*enable fast_div */
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= (~AR_FAST_DIV_ENABLE);
+ regval |= ((value >> 7) & 0x1) <<
+ AR_FAST_DIV_ENABLE_S;
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+ ant_div_ctl1 =
+ ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+ /* check whether antenna diversity is enabled */
+ if ((ant_div_ctl1 >> 0x6) == 0x3) {
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ /*
+ * clear bits 25-30 main_lnaconf, alt_lnaconf,
+ * main_tb, alt_tb
+ */
+ regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_9485_ANT_DIV_ALT_LNACONF |
+ AR_PHY_9485_ANT_DIV_ALT_GAINTB |
+ AR_PHY_9485_ANT_DIV_MAIN_GAINTB));
+ /* by default use LNA1 for the main antenna */
+ regval |= (AR_PHY_9485_ANT_DIV_LNA1 <<
+ AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (AR_PHY_9485_ANT_DIV_LNA2 <<
+ AR_PHY_9485_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
+
+
}
+
}
static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
);
+ /* Write the power for duplicated frames - HT40 */
+
+ /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */
+ REG_WRITE(ah, 0xa3e0,
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
/* Write the HT20 power per rate set */
/* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
(u32)ATH_AMPDU_LIMIT_MAX);
/*
- * h/w can accept aggregates upto 16 bit lengths (65535).
- * The IE, however can hold upto 65536, which shows up here
+ * h/w can accept aggregates up to 16 bit lengths (65535).
+ * The IE, however can hold up to 65536, which shows up here
* as zero. Ignore 65536 since we are constrained by hw.
*/
if (tid->an->maxampdu)
rix = rates[i].idx;
series[i].Tries = rates[i].count;
- if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
- (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
+ if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
flags |= ATH9K_TXDESC_RTSENA;
} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
!is_pspoll, ctsrate,
0, series, 4, flags);
- if (sc->config.ath_aggr_prot && flags)
- ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
}
static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->control.sta;
+ struct ieee80211_vif *vif = info->control.vif;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_buf *bf;
memmove(skb->data, skb->data + padsize, padpos);
}
+ if ((vif && vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_AP_VLAN) ||
+ !ieee80211_is_data(hdr->frame_control))
+ info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
setup_frame_info(hw, skb, frmlen);
/*
MODULE_FIRMWARE("b43/ucode13.fw");
MODULE_FIRMWARE("b43/ucode14.fw");
MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode16_mimo.fw");
MODULE_FIRMWARE("b43/ucode5.fw");
MODULE_FIRMWARE("b43/ucode9.fw");
dev->mac_suspended++;
}
+ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
+ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
+ {
+ u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
+ if (on)
+ tmslow |= B43_TMSLOW_MACPHYCLKEN;
+ else
+ tmslow &= ~B43_TMSLOW_MACPHYCLKEN;
+ ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
+ }
+
static void b43_adjust_opmode(struct b43_wldev *dev)
{
struct b43_wl *wl = dev->wl;
{
struct b43_phy *phy = &dev->phy;
int err;
- u32 value32, macctl;
+ u32 macctl;
u16 value16;
/* Initialize the MAC control */
b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
- value32 = ssb_read32(dev->dev, SSB_TMSLOW);
- value32 |= 0x00100000;
- ssb_write32(dev->dev, SSB_TMSLOW, value32);
+ b43_mac_phy_clock_set(dev, true);
b43_write16(dev, B43_MMIO_POWERUP_DELAY,
dev->dev->bus->chipco.fast_pwrup_delay);
b43_mac_enable(dev);
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
- /* Start maintainance work */
+ /* Start maintenance work */
b43_periodic_tasks_setup(dev);
b43_leds_init(dev);
static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
{
- #ifdef CONFIG_SSB_DRIVER_PCICORE
struct ssb_bus *bus = dev->dev->bus;
u32 tmp;
- if (bus->pcicore.dev &&
- bus->pcicore.dev->id.coreid == SSB_DEV_PCI &&
- bus->pcicore.dev->id.revision <= 5) {
- /* IMCFGLO timeouts workaround. */
+ if ((bus->chip_id == 0x4311 && bus->chip_rev == 2) ||
+ (bus->chip_id == 0x4312)) {
tmp = ssb_read32(dev->dev, SSB_IMCFGLO);
- switch (bus->bustype) {
- case SSB_BUSTYPE_PCI:
- case SSB_BUSTYPE_PCMCIA:
- tmp &= ~SSB_IMCFGLO_REQTO;
- tmp &= ~SSB_IMCFGLO_SERTO;
- tmp |= 0x32;
- break;
- case SSB_BUSTYPE_SSB:
- tmp &= ~SSB_IMCFGLO_REQTO;
- tmp &= ~SSB_IMCFGLO_SERTO;
- tmp |= 0x53;
- break;
- default:
- break;
- }
+ tmp &= ~SSB_IMCFGLO_REQTO;
+ tmp &= ~SSB_IMCFGLO_SERTO;
+ tmp |= 0x3;
ssb_write32(dev->dev, SSB_IMCFGLO, tmp);
+ ssb_commit_settings(bus);
}
- #endif /* CONFIG_SSB_DRIVER_PCICORE */
}
static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
{
struct b43_wldev *wldev;
- struct pci_dev *pdev;
int err = -ENOMEM;
- if (!list_empty(&wl->devlist)) {
- /* We are not the first core on this chip. */
- pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
- /* Only special chips support more than one wireless
- * core, although some of the other chips have more than
- * one wireless core as well. Check for this and
- * bail out early.
- */
- if (!pdev ||
- ((pdev->device != 0x4321) &&
- (pdev->device != 0x4313) && (pdev->device != 0x431A))) {
- b43dbg(wl, "Ignoring unconnected 802.11 core\n");
- return -ENODEV;
- }
- }
-
wldev = kzalloc(sizeof(*wldev), GFP_KERNEL);
if (!wldev)
goto out;
return err;
}
- static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id)
+ static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id)
{
struct b43_wl *wl;
int err;
return err;
}
- static void b43_remove(struct ssb_device *dev)
+ static void b43_ssb_remove(struct ssb_device *dev)
{
struct b43_wl *wl = ssb_get_devtypedata(dev);
struct b43_wldev *wldev = ssb_get_drvdata(dev);
static struct ssb_driver b43_ssb_driver = {
.name = KBUILD_MODNAME,
.id_table = b43_ssb_tbl,
- .probe = b43_probe,
- .remove = b43_remove,
+ .probe = b43_ssb_probe,
+ .remove = b43_ssb_remove,
};
static void b43_print_driverinfo(void)
/**
* iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
- void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
+ static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
int write_ptr = txq->q.write_ptr;
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != priv->cmd_queue) {
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
- break;
- }
+ sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
+ sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += WEP_IV_LEN + WEP_ICV_LEN;
+ break;
}
bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
- void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
+ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
int txq_id = txq->q.id;
struct iwl_tx_cmd *tx_cmd;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int txq_id;
- dma_addr_t phys_addr;
+ dma_addr_t phys_addr = 0;
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
u16 len, firstlen, secondlen;
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
+ goto drop_unlock_priv;
}
fc = hdr->frame_control;
hdr_len = ieee80211_hdrlen(fc);
- /* Find index into station table for destination station */
- sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock_priv;
+ /* For management frames use broadcast id to do not break aggregation */
+ if (!ieee80211_is_data(fc))
+ sta_id = ctx->bcast_sta_id;
+ else {
+ /* Find index into station table for destination station */
+ sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
- goto drop_unlock;
++ goto drop_unlock_priv;
+ }
}
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
+
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
+ goto drop_unlock_sta;
+
seq_number = priv->stations[sta_id].tid[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl = hdr->seq_ctrl &
txq = &priv->txq[txq_id];
q = &txq->q;
- if (unlikely(iwl_queue_space(q) < q->high_mark)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
-
- spin_unlock(&priv->sta_lock);
+ if (unlikely(iwl_queue_space(q) < q->high_mark))
+ goto drop_unlock_sta;
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
txcmd_phys = pci_map_single(priv->pci_dev,
&out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys)))
+ goto drop_unlock_sta;
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, firstlen, 1, 0);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (secondlen > 0) {
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
secondlen, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ PCI_DMA_BIDIRECTIONAL);
+ goto drop_unlock_sta;
+ }
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&priv->sta_lock);
+
+ /* Attach buffers to TFD */
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, firstlen, 1, 0);
+ if (secondlen > 0)
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, secondlen,
0, 0);
- }
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
/* Set up entry for this TFD in Tx byte-count array */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
+ iwlagn_txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
firstlen, PCI_DMA_BIDIRECTIONAL);
return 0;
- drop_unlock:
+ drop_unlock_sta:
+ spin_unlock(&priv->sta_lock);
+ drop_unlock_priv:
spin_unlock_irqrestore(&priv->lock, flags);
return -1;
}
txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
tx_info->skb = NULL;
- if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
- priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
+ iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
}
*
*/
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
sme->ssid, sme->ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
if (!bss) {
- lbs_pr_err("assoc: bss %pM not in scan results\n",
- sme->bssid);
+ wiphy_err(wiphy, "assoc: bss %pM not in scan results\n",
+ sme->bssid);
ret = -ENOENT;
goto done;
}
* we remove all keys like in the WPA/WPA2 setup,
* we just don't set RSN.
*
- * Therefore: fall-throught
+ * Therefore: fall-through
*/
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
lbs_enable_rsn(priv, sme->crypto.cipher_group != 0);
break;
default:
- lbs_pr_err("unsupported cipher group 0x%x\n",
- sme->crypto.cipher_group);
+ wiphy_err(wiphy, "unsupported cipher group 0x%x\n",
+ sme->crypto.cipher_group);
ret = -ENOTSUPP;
goto done;
}
params->key, params->key_len);
break;
default:
- lbs_pr_err("unhandled cipher 0x%x\n", params->cipher);
+ wiphy_err(wiphy, "unhandled cipher 0x%x\n", params->cipher);
ret = -ENOTSUPP;
break;
}
ret = wiphy_register(wdev->wiphy);
if (ret < 0)
- lbs_pr_err("cannot register wiphy device\n");
+ pr_err("cannot register wiphy device\n");
priv->wiphy_registered = true;
ret = register_netdev(priv->dev);
if (ret)
- lbs_pr_err("cannot register network device\n");
+ pr_err("cannot register network device\n");
INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
adapter = kzalloc(sizeof(struct mwifiex_adapter), GFP_KERNEL);
if (!adapter)
- return -1;
+ return -ENOMEM;
g_adapter = adapter;
adapter->card = card;
*/
static int mwifiex_unregister(struct mwifiex_adapter *adapter)
{
- s32 i = 0;
+ s32 i;
del_timer(&adapter->cmd_timer);
*/
static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
{
- int ret = 0;
- int err;
+ int ret, err;
struct mwifiex_fw_image fw;
memset(&fw, 0, sizeof(struct mwifiex_fw_image));
static void
mwifiex_fill_buffer(struct sk_buff *skb)
{
- struct ethhdr *eth = NULL;
+ struct ethhdr *eth;
struct iphdr *iph;
struct timeval tv;
u8 tid = 0;
mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct sk_buff *new_skb = NULL;
+ struct sk_buff *new_skb;
struct mwifiex_txinfo *tx_info;
dev_dbg(priv->adapter->dev, "data: %lu BSS(%d): Data <= kernel\n",
jiffies, priv->bss_index);
if (priv->adapter->surprise_removed) {
- kfree(skb);
+ kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
}
if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
- kfree(skb);
+ kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
}
skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
if (unlikely(!new_skb)) {
dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
- kfree(skb);
+ kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
}
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct sockaddr *hw_addr = (struct sockaddr *) addr;
- int ret = 0;
+ int ret;
memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
struct mwifiex_adapter *adapter,
u8 bss_index, u8 bss_type)
{
- struct net_device *dev = NULL;
- struct mwifiex_private *priv = NULL;
- void *mdev_priv = NULL;
+ struct net_device *dev;
+ struct mwifiex_private *priv;
+ void *mdev_priv;
dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), "mlan%d",
ether_setup, 1);
dev_err(adapter->dev, "no memory available for netdevice\n");
goto error;
}
- if (dev_alloc_name(dev, dev->name)) {
- dev_err(adapter->dev, "unable to alloc name for netdevice\n");
- goto error;
- }
if (mwifiex_register_cfg80211(dev, adapter->priv[bss_index]->curr_addr,
adapter->priv[bss_index]) != 0) {
static void
mwifiex_remove_interface(struct mwifiex_adapter *adapter, u8 bss_index)
{
- struct net_device *dev = NULL;
+ struct net_device *dev;
struct mwifiex_private *priv = adapter->priv[bss_index];
if (!priv)
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
*/
#define RF2820 0x0001
#define RF3320 0x000b
#define RF3322 0x000c
#define RF3853 0x000d
+ #define RF5370 0x5370
#define RF5390 0x5390
/*
* READ_CONTROL: 0 write BBP, 1 read BBP
* BUSY: ASIC is busy executing BBP commands
* BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
- * BBP_RW_MODE: 0 serial, 1 paralell
+ * BBP_RW_MODE: 0 serial, 1 parallel
*/
#define BBP_CSR_CFG 0x101c
#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
if (rf->channel > 14) {
/*
* When TX power is below 0, we should increase it by 7 to
- * make it a positive value (Minumum value is -7).
+ * make it a positive value (Minimum value is -7).
* However this means that values between 0 and 7 have
* double meaning, and we should set a 7DBm boost flag.
*/
rt2x00_rf(rt2x00dev, RF3052) ||
rt2x00_rf(rt2x00dev, RF3320))
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
- else if (rt2x00_rf(rt2x00dev, RF5390))
+ else if (rt2x00_rf(rt2x00dev, RF5370) ||
+ rt2x00_rf(rt2x00dev, RF5390))
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
!rt2x00_rf(rt2x00dev, RF3022) &&
!rt2x00_rf(rt2x00dev, RF3052) &&
!rt2x00_rf(rt2x00dev, RF3320) &&
+ !rt2x00_rf(rt2x00dev, RF5370) &&
!rt2x00_rf(rt2x00dev, RF5390)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3320) ||
+ rt2x00_rf(rt2x00dev, RF5370) ||
rt2x00_rf(rt2x00dev, RF5390)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
/*
* If the transfer to hardware succeeded, it does not mean the
* frame was send out correctly. It only means the frame
- * was succesfully pushed to the hardware, we have no
+ * was successfully pushed to the hardware, we have no
* way to determine the transmission status right now.
* (Only indirectly by looking at the failed TX counters
* in the register).
if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
- return true;
+ return false;
/*
* USB devices cannot blindly pass the skb->len as the
if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
- return true;
+ return false;
rt2x00lib_dmastart(entry);
struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
- return true;
+ return false;
usb_kill_urb(entry_priv->urb);
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
ret = -ETIMEDOUT;
- goto out;
+ goto fail;
}
poll_count++;
status = le16_to_cpu(cmd->status);
if (status != CMD_STATUS_SUCCESS) {
wl1271_error("command execute failure %d", status);
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
ret = -EIO;
+ goto fail;
}
wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
WL1271_ACX_INTR_CMD_COMPLETE);
+ return 0;
- out:
+ fail:
+ WARN_ON(1);
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
return ret;
}
if (gp->tx_bip_fem_auto_detect)
answer = true;
+ /* Override the REF CLK from the NVS with the one from platform data */
+ gen_parms->general_params.ref_clock = wl->ref_clock;
+
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
if (gp->tx_bip_fem_auto_detect)
answer = true;
+ /* Replace REF and TCXO CLKs with the ones from platform data */
+ gen_parms->general_params.ref_clock = wl->ref_clock;
+ gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
+
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
- * @len: lenght of buf
+ * @len: length of buf
*/
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
{
memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
- cmd->aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
+ cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
cmd->bss_index = WL1271_AP_BSS_INDEX;
cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
CONF_SG_TEMP_PARAM_3,
CONF_SG_TEMP_PARAM_4,
CONF_SG_TEMP_PARAM_5,
- CONF_SG_PARAMS_MAX,
+
+ /*
+ * AP beacon miss
+ *
+ * Range: 0 - 255
+ */
+ CONF_SG_AP_BEACON_MISS_TX,
+
+ /*
+ * AP RX window length
+ *
+ * Range: 0 - 50
+ */
+ CONF_SG_RX_WINDOW_LENGTH,
+
+ /*
+ * AP connection protection time
+ *
+ * Range: 0 - 5000
+ */
+ CONF_SG_AP_CONNECTION_PROTECTION_TIME,
+
+ CONF_SG_TEMP_PARAM_6,
+ CONF_SG_TEMP_PARAM_7,
+ CONF_SG_TEMP_PARAM_8,
+ CONF_SG_TEMP_PARAM_9,
+ CONF_SG_TEMP_PARAM_10,
+
+ CONF_SG_STA_PARAMS_MAX = CONF_SG_TEMP_PARAM_5 + 1,
+ CONF_SG_AP_PARAMS_MAX = CONF_SG_TEMP_PARAM_10 + 1,
+
CONF_SG_PARAMS_ALL = 0xff
};
struct conf_sg_settings {
- u32 params[CONF_SG_PARAMS_MAX];
+ u32 sta_params[CONF_SG_STA_PARAMS_MAX];
+ u32 ap_params[CONF_SG_AP_PARAMS_MAX];
u8 state;
};
#define CONF_TX_RATE_RETRY_LIMIT 10
/*
- * Rates supported for data packets when operating as AP. Note the absense
+ * Rates supported for data packets when operating as AP. Note the absence
* of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
* one. The rate dropped is not mandatory under any operating mode.
*/
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
CONF_HW_BIT_RATE_54MBPS)
+ #define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \
+ CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \
+ CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
+ CONF_HW_BIT_RATE_54MBPS)
+
+
/*
* Default rates for management traffic when operating in AP mode. This
* should be configured according to the basic rate set of the AP
#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
+ /*
+ * Default rates for working as IBSS. use 11b rates
+ */
+ #define CONF_TX_IBSS_DEFAULT_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_11MBPS);
+
struct conf_tx_rate_class {
/*
CONF_TX_AC_BK = 1, /* background */
CONF_TX_AC_VI = 2, /* video */
CONF_TX_AC_VO = 3, /* voice */
- CONF_TX_AC_CTS2SELF = 4, /* fictious AC, follows AC_VO */
+ CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */
CONF_TX_AC_ANY_TID = 0x1f
};
struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
/*
- * Configuration for rate classes in AP-mode. These rate classes
- * are for the AC TX queues
- */
- struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
-
- /*
- * Management TX rate class for AP-mode.
- */
- struct conf_tx_rate_class ap_mgmt_conf;
-
- /*
- * Broadcast TX rate class for AP-mode.
- */
- struct conf_tx_rate_class ap_bcst_conf;
-
- /*
- * Allow this number of TX retries to a connected station/AP before an
+ * AP-mode - allow this number of TX retries to a station before an
* event is triggered from FW.
- * In AP-mode the hlids of unreachable stations are given in the
- * "sta_tx_retry_exceeded" member in the event mailbox.
- */
- u8 max_tx_retries;
-
- /*
- * AP-mode - after this number of seconds a connected station is
- * considered inactive.
*/
- u16 ap_aging_period;
+ u16 ap_max_tx_retries;
/*
* Configuration for TID parameters.
};
+ struct conf_sched_scan_settings {
+ /* minimum time to wait on the channel for active scans (in TUs) */
+ u16 min_dwell_time_active;
+
+ /* maximum time to wait on the channel for active scans (in TUs) */
+ u16 max_dwell_time_active;
+
+ /* time to wait on the channel for passive scans (in TUs) */
+ u32 dwell_time_passive;
+
+ /* number of probe requests to send on each channel in active scans */
+ u8 num_probe_reqs;
+
+ /* RSSI threshold to be used for filtering */
+ s8 rssi_threshold;
+
+ /* SNR threshold to be used for filtering */
+ s8 snr_threshold;
+ };
+
/* these are number of channels on the band divided by two, rounded up */
#define CONF_TX_PWR_COMPENSATION_LEN_2 7
#define CONF_TX_PWR_COMPENSATION_LEN_5 18
/*
* Minimum required free tx memory blocks in order to assure optimum
- * performence
+ * performance
*
* Range: 0-120
*/
/*
* Minimum required free rx memory blocks in order to assure optimum
- * performence
+ * performance
*
* Range: 0-120
*/
u8 tx_min;
};
+ struct conf_fm_coex {
+ u8 enable;
+ u8 swallow_period;
+ u8 n_divider_fref_set_1;
+ u8 n_divider_fref_set_2;
+ u16 m_divider_fref_set_1;
+ u16 m_divider_fref_set_2;
+ u32 coex_pll_stabilization_time;
+ u16 ldo_stabilization_time;
+ u8 fm_disturbed_band_margin;
+ u8 swallow_clk_diff;
+ };
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
struct conf_pm_config_settings pm_config;
struct conf_roam_trigger_settings roam_trigger;
struct conf_scan_settings scan;
+ struct conf_sched_scan_settings sched_scan;
struct conf_rf_settings rf;
struct conf_ht_setting ht;
struct conf_memory_settings mem_wl127x;
struct conf_memory_settings mem_wl128x;
+ struct conf_fm_coex fm_coex;
u8 hci_io_ds;
};
* below.
*/
+ /**
+ * DOC: Virtual interface / concurrency capabilities
+ *
+ * Some devices are able to operate with virtual MACs, they can have
+ * more than one virtual interface. The capability handling for this
+ * is a bit complex though, as there may be a number of restrictions
+ * on the types of concurrency that are supported.
+ *
+ * To start with, each device supports the interface types listed in
+ * the %NL80211_ATTR_SUPPORTED_IFTYPES attribute, but by listing the
+ * types there no concurrency is implied.
+ *
+ * Once concurrency is desired, more attributes must be observed:
+ * To start with, since some interface types are purely managed in
+ * software, like the AP-VLAN type in mac80211 for example, there's
+ * an additional list of these, they can be added at any time and
+ * are only restricted by some semantic restrictions (e.g. AP-VLAN
+ * cannot be added without a corresponding AP interface). This list
+ * is exported in the %NL80211_ATTR_SOFTWARE_IFTYPES attribute.
+ *
+ * Further, the list of supported combinations is exported. This is
+ * in the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute. Basically,
+ * it exports a list of "groups", and at any point in time the
+ * interfaces that are currently active must fall into any one of
+ * the advertised groups. Within each group, there are restrictions
+ * on the number of interfaces of different types that are supported
+ * and also the number of different channels, along with potentially
+ * some other restrictions. See &enum nl80211_if_combination_attrs.
+ *
+ * All together, these attributes define the concurrency of virtual
+ * interfaces that a given device supports.
+ */
+
/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
* partial scan results may be available
*
+ * @NL80211_CMD_START_SCHED_SCAN: start a scheduled scan at certain
+ * intervals, as specified by %NL80211_ATTR_SCHED_SCAN_INTERVAL.
+ * Like with normal scans, if SSIDs (%NL80211_ATTR_SCAN_SSIDS)
+ * are passed, they are used in the probe requests. For
+ * broadcast, a broadcast SSID must be passed (ie. an empty
+ * string). If no SSID is passed, no probe requests are sent and
+ * a passive scan is performed. %NL80211_ATTR_SCAN_FREQUENCIES,
+ * if passed, define which channels should be scanned; if not
+ * passed, all channels allowed for the current regulatory domain
+ * are used. Extra IEs can also be passed from the userspace by
+ * using the %NL80211_ATTR_IE attribute.
+ * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan
+ * @NL80211_CMD_SCHED_SCAN_RESULTS: indicates that there are scheduled scan
+ * results available.
+ * @NL80211_CMD_SCHED_SCAN_STOPPED: indicates that the scheduled scan has
+ * stopped. The driver may issue this event at any time during a
+ * scheduled scan. One reason for stopping the scan is if the hardware
+ * does not support starting an association or a normal scan while running
+ * a scheduled scan. This event is also sent when the
+ * %NL80211_CMD_STOP_SCHED_SCAN command is received or when the interface
+ * is brought down while a scheduled scan was running.
+ *
* @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation
* or noise level
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* new station with the AUTHENTICATED flag unset and maybe change it later
* depending on the authentication result.
*
+ * @NL80211_CMD_GET_WOWLAN: get Wake-on-Wireless-LAN (WoWLAN) settings.
+ * @NL80211_CMD_SET_WOWLAN: set Wake-on-Wireless-LAN (WoWLAN) settings.
+ * Since wireless is more complex than wired ethernet, it supports
+ * various triggers. These triggers can be configured through this
+ * command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For
+ * more background information, see
+ * http://wireless.kernel.org/en/users/Documentation/WoWLAN.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
enum nl80211_commands {
-/* don't change the order or add anything inbetween, this is ABI! */
+/* don't change the order or add anything between, this is ABI! */
NL80211_CMD_UNSPEC,
NL80211_CMD_GET_WIPHY, /* can dump */
NL80211_CMD_NEW_PEER_CANDIDATE,
+ NL80211_CMD_GET_WOWLAN,
+ NL80211_CMD_SET_WOWLAN,
+
+ NL80211_CMD_START_SCHED_SCAN,
+ NL80211_CMD_STOP_SCHED_SCAN,
+ NL80211_CMD_SCHED_SCAN_RESULTS,
+ NL80211_CMD_SCHED_SCAN_STOPPED,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
* This can be used to mask out antennas which are not attached or should
* not be used for receiving. If an antenna is not selected in this bitmap
* the hardware should not be configured to receive on this antenna.
- * For a more detailed descripton see @NL80211_ATTR_WIPHY_ANTENNA_TX.
+ * For a more detailed description see @NL80211_ATTR_WIPHY_ANTENNA_TX.
*
* @NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX: Bitmap of antennas which are available
* for configuration as TX antennas via the above parameters.
* @NL80211_ATTR_SUPPORT_MESH_AUTH: Currently, this means the underlying driver
* allows auth frames in a mesh to be passed to userspace for processing via
* the @NL80211_MESH_SETUP_USERSPACE_AUTH flag.
+ * @NL80211_ATTR_STA_PLINK_STATE: The state of a mesh peer link as
+ * defined in &enum nl80211_plink_state. Used when userspace is
+ * driving the peer link management state machine.
+ * @NL80211_MESH_SETUP_USERSPACE_AMPE must be enabled.
+ *
+ * @NL80211_ATTR_WOWLAN_SUPPORTED: indicates, as part of the wiphy capabilities,
+ * the supported WoWLAN triggers
+ * @NL80211_ATTR_WOWLAN_TRIGGERS: used by %NL80211_CMD_SET_WOWLAN to
+ * indicate which WoW triggers should be enabled. This is also
+ * used by %NL80211_CMD_GET_WOWLAN to get the currently enabled WoWLAN
+ * triggers.
+
+ * @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan
+ * cycles, in msecs.
+ *
+ * @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported
+ * interface combinations. In each nested item, it contains attributes
+ * defined in &enum nl80211_if_combination_attrs.
+ * @NL80211_ATTR_SOFTWARE_IFTYPES: Nested attribute (just like
+ * %NL80211_ATTR_SUPPORTED_IFTYPES) containing the interface types that
+ * are managed in software: interfaces of these types aren't subject to
+ * any restrictions in their number or combinations.
*
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
enum nl80211_attrs {
-/* don't change the order or add anything inbetween, this is ABI! */
+/* don't change the order or add anything between, this is ABI! */
NL80211_ATTR_UNSPEC,
NL80211_ATTR_WIPHY,
NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
NL80211_ATTR_SUPPORT_MESH_AUTH,
+ NL80211_ATTR_STA_PLINK_STATE,
+
+ NL80211_ATTR_WOWLAN_TRIGGERS,
+ NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED,
+
+ NL80211_ATTR_SCHED_SCAN_INTERVAL,
+
+ NL80211_ATTR_INTERFACE_COMBINATIONS,
+ NL80211_ATTR_SOFTWARE_IFTYPES,
/* add attributes here, update the policy in nl80211.c */
* @NL80211_IFTYPE_ADHOC: independent BSS member
* @NL80211_IFTYPE_STATION: managed BSS member
* @NL80211_IFTYPE_AP: access point
- * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points
+ * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points; VLAN interfaces
+ * are a bit special in that they must always be tied to a pre-existing
+ * AP type interface.
* @NL80211_IFTYPE_WDS: wireless distribution interface
* @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames
* @NL80211_IFTYPE_MESH_POINT: mesh point
* @NL80211_STA_INFO_LLID: the station's mesh LLID
* @NL80211_STA_INFO_PLID: the station's mesh PLID
* @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
+ * (see %enum nl80211_plink_state)
* @NL80211_STA_INFO_RX_BITRATE: last unicast data frame rx rate, nested
* attribute, like NL80211_STA_INFO_TX_BITRATE.
* @NL80211_STA_INFO_BSS_PARAM: current station's view of BSS, nested attribute
* 802.11 country information element with regulatory information it
* thinks we should consider. cfg80211 only processes the country
* code from the IE, and relies on the regulatory domain information
- * structure pased by userspace (CRDA) from our wireless-regdb.
+ * structure passed by userspace (CRDA) from our wireless-regdb.
* If a channel is enabled but the country code indicates it should
* be disabled we disable the channel and re-enable it upon disassociation.
*/
* @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in
* millisecond units, used by the Peer Link Open message
*
- * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the inital confirm timeout, in
+ * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the initial confirm timeout, in
* millisecond units, used by the peer link management to close a peer link
*
* @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in
* @NL80211_MESH_SETUP_USERSPACE_AUTH: Enable this option if an authentication
* daemon will be authenticating mesh candidates.
*
+ * @NL80211_MESH_SETUP_USERSPACE_AMPE: Enable this option if an authentication
+ * daemon will be securing peer link frames. AMPE is a secured version of Mesh
+ * Peering Management (MPM) and is implemented with the assistance of a
+ * userspace daemon. When this flag is set, the kernel will send peer
+ * management frames to a userspace daemon that will implement AMPE
+ * functionality (security capabilities selection, key confirmation, and key
+ * management). When the flag is unset (default), the kernel can autonomously
+ * complete (unsecured) mesh peering without the need of a userspace daemon.
+ *
* @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
* @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
*/
NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC,
NL80211_MESH_SETUP_IE,
NL80211_MESH_SETUP_USERSPACE_AUTH,
+ NL80211_MESH_SETUP_USERSPACE_AMPE,
/* keep last */
__NL80211_MESH_SETUP_ATTR_AFTER_LAST,
NL80211_TX_POWER_FIXED,
};
+ /**
+ * enum nl80211_wowlan_packet_pattern_attr - WoWLAN packet pattern attribute
+ * @__NL80211_WOWLAN_PKTPAT_INVALID: invalid number for nested attribute
+ * @NL80211_WOWLAN_PKTPAT_PATTERN: the pattern, values where the mask has
+ * a zero bit are ignored
+ * @NL80211_WOWLAN_PKTPAT_MASK: pattern mask, must be long enough to have
+ * a bit for each byte in the pattern. The lowest-order bit corresponds
+ * to the first byte of the pattern, but the bytes of the pattern are
+ * in a little-endian-like format, i.e. the 9th byte of the pattern
+ * corresponds to the lowest-order bit in the second byte of the mask.
+ * For example: The match 00:xx:00:00:xx:00:00:00:00:xx:xx:xx (where
+ * xx indicates "don't care") would be represented by a pattern of
+ * twelve zero bytes, and a mask of "0xed,0x07".
+ * Note that the pattern matching is done as though frames were not
+ * 802.11 frames but 802.3 frames, i.e. the frame is fully unpacked
+ * first (including SNAP header unpacking) and then matched.
+ * @NUM_NL80211_WOWLAN_PKTPAT: number of attributes
+ * @MAX_NL80211_WOWLAN_PKTPAT: max attribute number
+ */
+ enum nl80211_wowlan_packet_pattern_attr {
+ __NL80211_WOWLAN_PKTPAT_INVALID,
+ NL80211_WOWLAN_PKTPAT_MASK,
+ NL80211_WOWLAN_PKTPAT_PATTERN,
+
+ NUM_NL80211_WOWLAN_PKTPAT,
+ MAX_NL80211_WOWLAN_PKTPAT = NUM_NL80211_WOWLAN_PKTPAT - 1,
+ };
+
+ /**
+ * struct nl80211_wowlan_pattern_support - pattern support information
+ * @max_patterns: maximum number of patterns supported
+ * @min_pattern_len: minimum length of each pattern
+ * @max_pattern_len: maximum length of each pattern
+ *
+ * This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when
+ * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the
+ * capability information given by the kernel to userspace.
+ */
+ struct nl80211_wowlan_pattern_support {
+ __u32 max_patterns;
+ __u32 min_pattern_len;
+ __u32 max_pattern_len;
+ } __attribute__((packed));
+
+ /**
+ * enum nl80211_wowlan_triggers - WoWLAN trigger definitions
+ * @__NL80211_WOWLAN_TRIG_INVALID: invalid number for nested attributes
+ * @NL80211_WOWLAN_TRIG_ANY: wake up on any activity, do not really put
+ * the chip into a special state -- works best with chips that have
+ * support for low-power operation already (flag)
+ * @NL80211_WOWLAN_TRIG_DISCONNECT: wake up on disconnect, the way disconnect
+ * is detected is implementation-specific (flag)
+ * @NL80211_WOWLAN_TRIG_MAGIC_PKT: wake up on magic packet (6x 0xff, followed
+ * by 16 repetitions of MAC addr, anywhere in payload) (flag)
+ * @NL80211_WOWLAN_TRIG_PKT_PATTERN: wake up on the specified packet patterns
+ * which are passed in an array of nested attributes, each nested attribute
+ * defining a with attributes from &struct nl80211_wowlan_trig_pkt_pattern.
+ * Each pattern defines a wakeup packet. The matching is done on the MSDU,
+ * i.e. as though the packet was an 802.3 packet, so the pattern matching
+ * is done after the packet is converted to the MSDU.
+ *
+ * In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute
+ * carrying a &struct nl80211_wowlan_pattern_support.
+ * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers
+ * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number
+ */
+ enum nl80211_wowlan_triggers {
+ __NL80211_WOWLAN_TRIG_INVALID,
+ NL80211_WOWLAN_TRIG_ANY,
+ NL80211_WOWLAN_TRIG_DISCONNECT,
+ NL80211_WOWLAN_TRIG_MAGIC_PKT,
+ NL80211_WOWLAN_TRIG_PKT_PATTERN,
+
+ /* keep last */
+ NUM_NL80211_WOWLAN_TRIG,
+ MAX_NL80211_WOWLAN_TRIG = NUM_NL80211_WOWLAN_TRIG - 1
+ };
+
+ /**
+ * enum nl80211_iface_limit_attrs - limit attributes
+ * @NL80211_IFACE_LIMIT_UNSPEC: (reserved)
+ * @NL80211_IFACE_LIMIT_MAX: maximum number of interfaces that
+ * can be chosen from this set of interface types (u32)
+ * @NL80211_IFACE_LIMIT_TYPES: nested attribute containing a
+ * flag attribute for each interface type in this set
+ * @NUM_NL80211_IFACE_LIMIT: number of attributes
+ * @MAX_NL80211_IFACE_LIMIT: highest attribute number
+ */
+ enum nl80211_iface_limit_attrs {
+ NL80211_IFACE_LIMIT_UNSPEC,
+ NL80211_IFACE_LIMIT_MAX,
+ NL80211_IFACE_LIMIT_TYPES,
+
+ /* keep last */
+ NUM_NL80211_IFACE_LIMIT,
+ MAX_NL80211_IFACE_LIMIT = NUM_NL80211_IFACE_LIMIT - 1
+ };
+
+ /**
+ * enum nl80211_if_combination_attrs -- interface combination attributes
+ *
+ * @NL80211_IFACE_COMB_UNSPEC: (reserved)
+ * @NL80211_IFACE_COMB_LIMITS: Nested attributes containing the limits
+ * for given interface types, see &enum nl80211_iface_limit_attrs.
+ * @NL80211_IFACE_COMB_MAXNUM: u32 attribute giving the total number of
+ * interfaces that can be created in this group. This number doesn't
+ * apply to interfaces purely managed in software, which are listed
+ * in a separate attribute %NL80211_ATTR_INTERFACES_SOFTWARE.
+ * @NL80211_IFACE_COMB_STA_AP_BI_MATCH: flag attribute specifying that
+ * beacon intervals within this group must be all the same even for
+ * infrastructure and AP/GO combinations, i.e. the GO(s) must adopt
+ * the infrastructure network's beacon interval.
+ * @NL80211_IFACE_COMB_NUM_CHANNELS: u32 attribute specifying how many
+ * different channels may be used within this group.
+ * @NUM_NL80211_IFACE_COMB: number of attributes
+ * @MAX_NL80211_IFACE_COMB: highest attribute number
+ *
+ * Examples:
+ * limits = [ #{STA} <= 1, #{AP} <= 1 ], matching BI, channels = 1, max = 2
+ * => allows an AP and a STA that must match BIs
+ *
+ * numbers = [ #{AP, P2P-GO} <= 8 ], channels = 1, max = 8
+ * => allows 8 of AP/GO
+ *
+ * numbers = [ #{STA} <= 2 ], channels = 2, max = 2
+ * => allows two STAs on different channels
+ *
+ * numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4
+ * => allows a STA plus three P2P interfaces
+ *
+ * The list of these four possiblities could completely be contained
+ * within the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute to indicate
+ * that any of these groups must match.
+ *
+ * "Combinations" of just a single interface will not be listed here,
+ * a single interface of any valid interface type is assumed to always
+ * be possible by itself. This means that implicitly, for each valid
+ * interface type, the following group always exists:
+ * numbers = [ #{<type>} <= 1 ], channels = 1, max = 1
+ */
+ enum nl80211_if_combination_attrs {
+ NL80211_IFACE_COMB_UNSPEC,
+ NL80211_IFACE_COMB_LIMITS,
+ NL80211_IFACE_COMB_MAXNUM,
+ NL80211_IFACE_COMB_STA_AP_BI_MATCH,
+ NL80211_IFACE_COMB_NUM_CHANNELS,
+
+ /* keep last */
+ NUM_NL80211_IFACE_COMB,
+ MAX_NL80211_IFACE_COMB = NUM_NL80211_IFACE_COMB - 1
+ };
+
+
+ /**
+ * enum nl80211_plink_state - state of a mesh peer link finite state machine
+ *
+ * @NL80211_PLINK_LISTEN: initial state, considered the implicit
+ * state of non existant mesh peer links
+ * @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to
+ * this mesh peer
+ * @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received
+ * from this mesh peer
+ * @NL80211_PLINK_CNF_RCVD: mesh plink confirm frame has been
+ * received from this mesh peer
+ * @NL80211_PLINK_ESTAB: mesh peer link is established
+ * @NL80211_PLINK_HOLDING: mesh peer link is being closed or cancelled
+ * @NL80211_PLINK_BLOCKED: all frames transmitted from this mesh
+ * plink are discarded
+ * @NUM_NL80211_PLINK_STATES: number of peer link states
+ * @MAX_NL80211_PLINK_STATES: highest numerical value of plink states
+ */
+ enum nl80211_plink_state {
+ NL80211_PLINK_LISTEN,
+ NL80211_PLINK_OPN_SNT,
+ NL80211_PLINK_OPN_RCVD,
+ NL80211_PLINK_CNF_RCVD,
+ NL80211_PLINK_ESTAB,
+ NL80211_PLINK_HOLDING,
+ NL80211_PLINK_BLOCKED,
+
+ /* keep last */
+ NUM_NL80211_PLINK_STATES,
+ MAX_NL80211_PLINK_STATES = NUM_NL80211_PLINK_STATES - 1
+ };
+
#endif /* __LINUX_NL80211_H */
};
};
+ /**
+ * ieee80211_sched_scan_ies - scheduled scan IEs
+ *
+ * This structure is used to pass the appropriate IEs to be used in scheduled
+ * scans for all bands. It contains both the IEs passed from the userspace
+ * and the ones generated by mac80211.
+ *
+ * @ie: array with the IEs for each supported band
+ * @len: array with the total length of the IEs for each band
+ */
+ struct ieee80211_sched_scan_ies {
+ u8 *ie[IEEE80211_NUM_BANDS];
+ size_t len[IEEE80211_NUM_BANDS];
+ };
+
static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
{
return (struct ieee80211_tx_info *)skb->cb;
* acceleration (i.e. iwlwifi). Those drivers should provide update_tkip_key
* handler.
* The update_tkip_key() call updates the driver with the new phase 1 key.
- * This happens everytime the iv16 wraps around (every 65536 packets). The
+ * This happens every time the iv16 wraps around (every 65536 packets). The
* set_key() call will happen only once for each key (unless the AP did
* rekeying), it will not include a valid phase 1 key. The valid phase 1 key is
* provided by update_tkip_key only. The trigger that makes mac80211 call this
* you should ensure to cancel it on this callback.
* Must be implemented and can sleep.
*
+ * @suspend: Suspend the device; mac80211 itself will quiesce before and
+ * stop transmitting and doing any other configuration, and then
+ * ask the device to suspend. This is only invoked when WoWLAN is
+ * configured, otherwise the device is deconfigured completely and
+ * reconfigured at resume time.
+ *
+ * @resume: If WoWLAN was configured, this indicates that mac80211 is
+ * now resuming its operation, after this the device must be fully
+ * functional again. If this returns an error, the only way out is
+ * to also unregister the device. If it returns 1, then mac80211
+ * will also go through the regular complete restart on resume.
+ *
* @add_interface: Called when a netdevice attached to the hardware is
* enabled. Because it is not called for monitor mode devices, @start
* and @stop must be implemented.
* any error unless this callback returned a negative error code.
* The callback can sleep.
*
+ * @sched_scan_start: Ask the hardware to start scanning repeatedly at
+ * specific intervals. The driver must call the
+ * ieee80211_sched_scan_results() function whenever it finds results.
+ * This process will continue until sched_scan_stop is called.
+ *
+ * @sched_scan_stop: Tell the hardware to stop an ongoing scheduled scan.
+ *
* @sw_scan_start: Notifier function that is called just before a software scan
* is started. Can be NULL, if the driver doesn't need this notification.
* The callback can sleep.
void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
void (*stop)(struct ieee80211_hw *hw);
+ #ifdef CONFIG_PM
+ int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+ int (*resume)(struct ieee80211_hw *hw);
+ #endif
int (*add_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int (*change_interface)(struct ieee80211_hw *hw,
u32 iv32, u16 *phase1key);
int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
+ int (*sched_scan_start)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_sched_scan_ies *ies);
+ void (*sched_scan_stop)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
void (*sw_scan_start)(struct ieee80211_hw *hw);
void (*sw_scan_complete)(struct ieee80211_hw *hw);
int (*get_stats)(struct ieee80211_hw *hw,
*/
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted);
+ /**
+ * ieee80211_sched_scan_results - got results from scheduled scan
+ *
+ * When a scheduled scan is running, this function needs to be called by the
+ * driver whenever there are new scan results available.
+ *
+ * @hw: the hardware that is performing scheduled scans
+ */
+ void ieee80211_sched_scan_results(struct ieee80211_hw *hw);
+
+ /**
+ * ieee80211_sched_scan_stopped - inform that the scheduled scan has stopped
+ *
+ * When a scheduled scan is running, this function can be called by
+ * the driver if it needs to stop the scan to perform another task.
+ * Usual scenarios are drivers that cannot continue the scheduled scan
+ * while associating, for instance.
+ *
+ * @hw: the hardware that is performing scheduled scans
+ */
+ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
+
/**
* ieee80211_iterate_active_interfaces - iterate active interfaces
*
static struct workqueue_struct *_busy_wq;
- struct bt_sock_list l2cap_sk_list = {
- .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
- };
+ LIST_HEAD(chan_list);
+ DEFINE_RWLOCK(chan_list_lock);
static void l2cap_busy_work(struct work_struct *work);
return c;
}
+ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
+ {
+ struct l2cap_chan *c;
+
+ list_for_each_entry(c, &chan_list, global_l) {
+ if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
+ goto found;
+ }
+
+ c = NULL;
+ found:
+ return c;
+ }
+
+ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
+ {
+ int err;
+
+ write_lock_bh(&chan_list_lock);
+
+ if (psm && __l2cap_global_chan_by_addr(psm, src)) {
+ err = -EADDRINUSE;
+ goto done;
+ }
+
+ if (psm) {
+ chan->psm = psm;
+ chan->sport = psm;
+ err = 0;
+ } else {
+ u16 p;
+
+ err = -EINVAL;
+ for (p = 0x1001; p < 0x1100; p += 2)
+ if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
+ chan->psm = cpu_to_le16(p);
+ chan->sport = cpu_to_le16(p);
+ err = 0;
+ break;
+ }
+ }
+
+ done:
+ write_unlock_bh(&chan_list_lock);
+ return err;
+ }
+
+ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
+ {
+ write_lock_bh(&chan_list_lock);
+
+ chan->scid = scid;
+
+ write_unlock_bh(&chan_list_lock);
+
+ return 0;
+ }
+
static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
{
u16 cid = L2CAP_CID_DYN_START;
return 0;
}
- struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
+ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
{
struct l2cap_chan *chan;
chan->sk = sk;
+ write_lock_bh(&chan_list_lock);
+ list_add(&chan->global_l, &chan_list);
+ write_unlock_bh(&chan_list_lock);
+
return chan;
}
- void l2cap_chan_free(struct l2cap_chan *chan)
+ void l2cap_chan_destroy(struct l2cap_chan *chan)
{
+ write_lock_bh(&chan_list_lock);
+ list_del(&chan->global_l);
+ write_unlock_bh(&chan_list_lock);
+
kfree(chan);
}
/* Find socket with cid and source bdaddr.
* Returns closest match, locked.
*/
- static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
+ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
{
- struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
+ struct l2cap_chan *c, *c1 = NULL;
- read_lock(&l2cap_sk_list.lock);
+ read_lock(&chan_list_lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
if (state && sk->sk_state != state)
continue;
- if (chan->scid == cid) {
+ if (c->scid == cid) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
- break;
+ if (!bacmp(&bt_sk(sk)->src, src)) {
+ read_unlock(&chan_list_lock);
+ return c;
+ }
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- sk1 = sk;
+ c1 = c;
}
}
- read_unlock(&l2cap_sk_list.lock);
+ read_unlock(&chan_list_lock);
- return node ? sk : sk1;
+ return c1;
}
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
struct sock *parent, *sk;
- struct l2cap_chan *chan;
+ struct l2cap_chan *chan, *pchan;
BT_DBG("");
/* Check if we have socket listening on cid */
- parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+ pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
conn->src);
- if (!parent)
+ if (!pchan)
return;
+ parent = pchan->sk;
+
bh_lock_sock(parent);
/* Check for backlog size */
if (!sk)
goto clean;
- chan = l2cap_chan_alloc(sk);
+ chan = l2cap_chan_create(sk);
if (!chan) {
l2cap_sock_kill(sk);
goto clean;
/* Find socket with psm and source bdaddr.
* Returns closest match.
*/
- static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
+ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
{
- struct sock *sk = NULL, *sk1 = NULL;
- struct hlist_node *node;
+ struct l2cap_chan *c, *c1 = NULL;
- read_lock(&l2cap_sk_list.lock);
+ read_lock(&chan_list_lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
if (state && sk->sk_state != state)
continue;
- if (chan->psm == psm) {
+ if (c->psm == psm) {
/* Exact match. */
- if (!bacmp(&bt_sk(sk)->src, src))
- break;
+ if (!bacmp(&bt_sk(sk)->src, src)) {
+ read_unlock_bh(&chan_list_lock);
+ return c;
+ }
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
- sk1 = sk;
+ c1 = c;
}
}
- read_unlock(&l2cap_sk_list.lock);
+ read_unlock(&chan_list_lock);
- return node ? sk : sk1;
+ return c1;
}
int l2cap_chan_connect(struct l2cap_chan *chan)
{
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
- struct l2cap_chan *chan = NULL;
+ struct l2cap_chan *chan = NULL, *pchan;
struct sock *parent, *sk = NULL;
int result, status = L2CAP_CS_NO_INFO;
BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
/* Check if we have socket listening on psm */
- parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
- if (!parent) {
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
+ if (!pchan) {
result = L2CAP_CR_BAD_PSM;
goto sendresp;
}
+ parent = pchan->sk;
+
bh_lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
if (!sk)
goto response;
- chan = l2cap_chan_alloc(sk);
+ chan = l2cap_chan_create(sk);
if (!chan) {
l2cap_sock_kill(sk);
goto response;
static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
{
struct l2cap_chan *chan;
- struct sock *sk;
+ struct sock *sk = NULL;
struct l2cap_pinfo *pi;
u16 control;
u8 tx_seq;
static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
{
struct sock *sk;
+ struct l2cap_chan *chan;
- sk = l2cap_get_sock_by_psm(0, psm, conn->src);
- if (!sk)
+ chan = l2cap_global_chan_by_psm(0, psm, conn->src);
+ if (!chan)
goto drop;
+ sk = chan->sk;
+
bh_lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
{
struct sock *sk;
+ struct l2cap_chan *chan;
- sk = l2cap_get_sock_by_scid(0, cid, conn->src);
- if (!sk)
+ chan = l2cap_global_chan_by_scid(0, cid, conn->src);
+ if (!chan)
goto drop;
+ sk = chan->sk;
+
bh_lock_sock(sk);
BT_DBG("sk %p, len %d", sk, skb->len);
static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
int exact = 0, lm1 = 0, lm2 = 0;
- register struct sock *sk;
- struct hlist_node *node;
+ struct l2cap_chan *c;
if (type != ACL_LINK)
return -EINVAL;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets and check their link_mode */
- read_lock(&l2cap_sk_list.lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+ read_lock(&chan_list_lock);
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
lm1 |= HCI_LM_ACCEPT;
- if (chan->role_switch)
+ if (c->role_switch)
lm1 |= HCI_LM_MASTER;
exact++;
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm2 |= HCI_LM_ACCEPT;
- if (chan->role_switch)
+ if (c->role_switch)
lm2 |= HCI_LM_MASTER;
}
}
- read_unlock(&l2cap_sk_list.lock);
+ read_unlock(&chan_list_lock);
return exact ? lm1 : lm2;
}
static int l2cap_debugfs_show(struct seq_file *f, void *p)
{
- struct sock *sk;
- struct hlist_node *node;
+ struct l2cap_chan *c;
- read_lock_bh(&l2cap_sk_list.lock);
+ read_lock_bh(&chan_list_lock);
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- struct l2cap_pinfo *pi = l2cap_pi(sk);
- struct l2cap_chan *chan = pi->chan;
+ list_for_each_entry(c, &chan_list, global_l) {
+ struct sock *sk = c->sk;
seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst),
- sk->sk_state, __le16_to_cpu(chan->psm),
- chan->scid, chan->dcid,
- chan->imtu, chan->omtu, chan->sec_level,
- chan->mode);
+ sk->sk_state, __le16_to_cpu(c->psm),
+ c->scid, c->dcid, c->imtu, c->omtu,
+ c->sec_level, c->mode);
}
- read_unlock_bh(&l2cap_sk_list.lock);
+ read_unlock_bh(&chan_list_lock);
return 0;
}
sk_stop_timer(sk, &sk->sk_timer);
}
- static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
- {
- struct sock *sk;
- struct hlist_node *node;
- sk_for_each(sk, node, &l2cap_sk_list.head) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-
- if (chan->sport == psm && !bacmp(&bt_sk(sk)->src, src))
- goto found;
- }
-
- sk = NULL;
- found:
- return sk;
- }
-
static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
struct sock *sk = sock->sk;
}
}
- write_lock_bh(&l2cap_sk_list.lock);
+ if (la.l2_cid)
+ err = l2cap_add_scid(chan, la.l2_cid);
+ else
+ err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
- if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
- err = -EADDRINUSE;
- } else {
- /* Save source address */
- bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
- chan->psm = la.l2_psm;
- chan->sport = la.l2_psm;
- sk->sk_state = BT_BOUND;
-
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
- chan->sec_level = BT_SECURITY_SDP;
- }
+ if (err < 0)
+ goto done;
- if (la.l2_cid)
- chan->scid = la.l2_cid;
+ if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
+ __le16_to_cpu(la.l2_psm) == 0x0003)
+ chan->sec_level = BT_SECURITY_SDP;
- write_unlock_bh(&l2cap_sk_list.lock);
+ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+ sk->sk_state = BT_BOUND;
done:
release_sock(sk);
goto done;
}
- if (!chan->psm && !chan->scid) {
- bdaddr_t *src = &bt_sk(sk)->src;
- u16 psm;
-
- err = -EINVAL;
-
- write_lock_bh(&l2cap_sk_list.lock);
-
- for (psm = 0x1001; psm < 0x1100; psm += 2)
- if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
- chan->psm = cpu_to_le16(psm);
- chan->sport = cpu_to_le16(psm);
- err = 0;
- break;
- }
-
- write_unlock_bh(&l2cap_sk_list.lock);
-
- if (err < 0)
- goto done;
- }
-
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
if (opt == BT_FLUSHABLE_OFF) {
struct l2cap_conn *conn = chan->conn;
- /* proceed futher only when we have l2cap_conn and
+ /* proceed further only when we have l2cap_conn and
No Flush support in the LM */
if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
err = -EINVAL;
/* Kill poor orphan */
- l2cap_chan_free(l2cap_pi(sk)->chan);
- bt_sock_unlink(&l2cap_sk_list, sk);
+ l2cap_chan_destroy(l2cap_pi(sk)->chan);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
- bt_sock_link(&l2cap_sk_list, sk);
return sk;
}
if (!sk)
return -ENOMEM;
- chan = l2cap_chan_alloc(sk);
+ chan = l2cap_chan_create(sk);
if (!chan) {
l2cap_sock_kill(sk);
return -ENOMEM;
size_t supp_rates_len;
/*
- * During assocation, we save an ERP value from a probe response so
+ * During association, we save an ERP value from a probe response so
* that we can feed ERP info to the driver when handling the
* association completes. these fields probably won't be up-to-date
* otherwise, you probably don't want to use them.
};
struct ieee80211_if_ap {
- struct beacon_data *beacon;
+ struct beacon_data __rcu *beacon;
struct list_head vlans;
struct list_head list;
/* used for all tx if the VLAN is configured to 4-addr mode */
- struct sta_info *sta;
+ struct sta_info __rcu *sta;
};
struct mesh_stats {
unsigned long ibss_join_req;
/* probe response/beacon for IBSS */
- struct sk_buff *presp, *skb;
+ struct sk_buff __rcu *presp;
+ struct sk_buff *skb;
enum {
IEEE80211_IBSS_MLME_SEARCH,
bool accepting_plinks;
const u8 *ie;
u8 ie_len;
- bool is_secure;
+ enum {
+ IEEE80211_MESH_SEC_NONE = 0x0,
+ IEEE80211_MESH_SEC_AUTHED = 0x1,
+ IEEE80211_MESH_SEC_SECURED = 0x2,
+ } security;
};
#ifdef CONFIG_MAC80211_MESH
struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
unsigned int fragment_next;
- struct ieee80211_key *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
- struct ieee80211_key *default_unicast_key, *default_multicast_key;
- struct ieee80211_key *default_mgmt_key;
+ struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
+ struct ieee80211_key __rcu *default_unicast_key;
+ struct ieee80211_key __rcu *default_multicast_key;
+ struct ieee80211_key __rcu *default_mgmt_key;
u16 sequence_number;
__be16 control_port_protocol;
/* device is started */
bool started;
+ /* wowlan is enabled -- don't reconfig on resume */
+ bool wowlan;
+
int tx_headroom; /* required headroom for hardware/radiotap */
/* count for keys needing tailroom space allocation */
spinlock_t sta_lock;
unsigned long num_sta;
struct list_head sta_list, sta_pending_list;
- struct sta_info *sta_hash[STA_HASH_SIZE];
+ struct sta_info __rcu *sta_hash[STA_HASH_SIZE];
struct timer_list sta_cleanup;
struct work_struct sta_finish_work;
int sta_generation;
int scan_channel_idx;
int scan_ies_len;
+ bool sched_scanning;
+ struct ieee80211_sched_scan_ies sched_scan_ies;
+ struct work_struct sched_scan_stopped_work;
+
unsigned long leave_oper_channel_time;
enum mac80211_scan_state next_scan_state;
struct delayed_work scan_work;
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
+ /* scheduled scan handling */
+ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_sched_scan_request *req);
+ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_sched_scan_stopped_work(struct work_struct *work);
+
/* off-channel helpers */
bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
void ieee80211_stop_device(struct ieee80211_local *local);
#ifdef CONFIG_PM
- int __ieee80211_suspend(struct ieee80211_hw *hw);
+ int __ieee80211_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
static inline int __ieee80211_resume(struct ieee80211_hw *hw)
{
return ieee80211_reconfig(hw_to_local(hw));
}
#else
- static inline int __ieee80211_suspend(struct ieee80211_hw *hw)
+ static inline int __ieee80211_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
{
return 0;
}
/* APs need special treatment */
if (sdata->vif.type == NL80211_IFTYPE_AP) {
struct ieee80211_sub_if_data *vlan, *tmpsdata;
- struct beacon_data *old_beacon = sdata->u.ap.beacon;
+ struct beacon_data *old_beacon =
+ rtnl_dereference(sdata->u.ap.beacon);
/* sdata_running will return false, so this will disable */
ieee80211_bss_info_change_notify(sdata,
+ IEEE80211_ENCRYPT_HEADROOM;
ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
- ret = dev_alloc_name(ndev, ndev->name);
- if (ret < 0)
- goto fail;
-
ieee80211_assign_perm_addr(local, ndev, type);
memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
+
+ /* This lock will have the grow table function as writer and add / delete nodes
+ * as readers. When reading the table (i.e. doing lookups) we are well protected
+ * by RCU
+ */
+ static DEFINE_RWLOCK(pathtbl_resize_lock);
+
+
+ static struct mesh_table *mesh_table_alloc(int size_order)
+ {
+ int i;
+ struct mesh_table *newtbl;
+
+ newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
+ if (!newtbl)
+ return NULL;
+
+ newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
+ (1 << size_order), GFP_KERNEL);
+
+ if (!newtbl->hash_buckets) {
+ kfree(newtbl);
+ return NULL;
+ }
+
+ newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
+ (1 << size_order), GFP_KERNEL);
+ if (!newtbl->hashwlock) {
+ kfree(newtbl->hash_buckets);
+ kfree(newtbl);
+ return NULL;
+ }
+
+ newtbl->size_order = size_order;
+ newtbl->hash_mask = (1 << size_order) - 1;
+ atomic_set(&newtbl->entries, 0);
+ get_random_bytes(&newtbl->hash_rnd,
+ sizeof(newtbl->hash_rnd));
+ for (i = 0; i <= newtbl->hash_mask; i++)
+ spin_lock_init(&newtbl->hashwlock[i]);
+
+ return newtbl;
+ }
+
static void __mesh_table_free(struct mesh_table *tbl)
{
kfree(tbl->hash_buckets);
kfree(tbl);
}
- void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
+ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
mesh_hash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++) {
- spin_lock(&tbl->hashwlock[i]);
+ spin_lock_bh(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries);
}
- spin_unlock(&tbl->hashwlock[i]);
+ spin_unlock_bh(&tbl->hashwlock[i]);
}
__mesh_table_free(tbl);
}
static int mesh_table_grow(struct mesh_table *oldtbl,
- struct mesh_table *newtbl)
+ struct mesh_table *newtbl)
{
struct hlist_head *oldhash;
struct hlist_node *p, *q;
< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
return -EAGAIN;
-
newtbl->free_node = oldtbl->free_node;
newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
return -ENOMEM;
}
+ static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
+ struct mesh_table *tbl)
+ {
+ /* Use last four bytes of hw addr and interface index as hash index */
+ return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
+ & tbl->hash_mask;
+ }
- /* This lock will have the grow table function as writer and add / delete nodes
- * as readers. When reading the table (i.e. doing lookups) we are well protected
- * by RCU
- */
- static DEFINE_RWLOCK(pathtbl_resize_lock);
/**
*
if (!new_node)
goto err_node_alloc;
- read_lock(&pathtbl_resize_lock);
+ read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
bucket = &mesh_paths->hash_buckets[hash_idx];
- spin_lock(&mesh_paths->hashwlock[hash_idx]);
+ spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mesh_paths_generation++;
- spin_unlock(&mesh_paths->hashwlock[hash_idx]);
- read_unlock(&pathtbl_resize_lock);
+ spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
+ read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
return 0;
err_exists:
- spin_unlock(&mesh_paths->hashwlock[hash_idx]);
- read_unlock(&pathtbl_resize_lock);
+ spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
+ read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
{
struct mesh_table *oldtbl, *newtbl;
- newtbl = mesh_table_alloc(mesh_paths->size_order + 1);
+ rcu_read_lock();
+ newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
if (!newtbl)
return;
- write_lock(&pathtbl_resize_lock);
+ write_lock_bh(&pathtbl_resize_lock);
oldtbl = mesh_paths;
if (mesh_table_grow(mesh_paths, newtbl) < 0) {
+ rcu_read_unlock();
__mesh_table_free(newtbl);
- write_unlock(&pathtbl_resize_lock);
+ write_unlock_bh(&pathtbl_resize_lock);
return;
}
+ rcu_read_unlock();
rcu_assign_pointer(mesh_paths, newtbl);
- write_unlock(&pathtbl_resize_lock);
+ write_unlock_bh(&pathtbl_resize_lock);
synchronize_rcu();
mesh_table_free(oldtbl, false);
{
struct mesh_table *oldtbl, *newtbl;
- newtbl = mesh_table_alloc(mpp_paths->size_order + 1);
+ rcu_read_lock();
+ newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
if (!newtbl)
return;
- write_lock(&pathtbl_resize_lock);
+ write_lock_bh(&pathtbl_resize_lock);
oldtbl = mpp_paths;
if (mesh_table_grow(mpp_paths, newtbl) < 0) {
+ rcu_read_unlock();
__mesh_table_free(newtbl);
- write_unlock(&pathtbl_resize_lock);
+ write_unlock_bh(&pathtbl_resize_lock);
return;
}
+ rcu_read_unlock();
rcu_assign_pointer(mpp_paths, newtbl);
- write_unlock(&pathtbl_resize_lock);
+ write_unlock_bh(&pathtbl_resize_lock);
synchronize_rcu();
mesh_table_free(oldtbl, false);
if (!new_node)
goto err_node_alloc;
- read_lock(&pathtbl_resize_lock);
+ read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
new_mpath->sdata = sdata;
hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
bucket = &mpp_paths->hash_buckets[hash_idx];
- spin_lock(&mpp_paths->hashwlock[hash_idx]);
+ spin_lock_bh(&mpp_paths->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
grow = 1;
- spin_unlock(&mpp_paths->hashwlock[hash_idx]);
- read_unlock(&pathtbl_resize_lock);
+ spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
+ read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
return 0;
err_exists:
- spin_unlock(&mpp_paths->hashwlock[hash_idx]);
- read_unlock(&pathtbl_resize_lock);
+ spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
+ read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
int hash_idx;
int err = 0;
- read_lock(&pathtbl_resize_lock);
+ read_lock_bh(&pathtbl_resize_lock);
hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
bucket = &mesh_paths->hash_buckets[hash_idx];
- spin_lock(&mesh_paths->hashwlock[hash_idx]);
+ spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
err = -ENXIO;
enddel:
mesh_paths_generation++;
- spin_unlock(&mesh_paths->hashwlock[hash_idx]);
- read_unlock(&pathtbl_resize_lock);
+ spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
+ read_unlock_bh(&pathtbl_resize_lock);
return err;
}
*
* @mpath: mesh path whose queue has to be freed
*
- * Locking: the function must me called withing a rcu_read_lock region
+ * Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct hlist_node *p;
int i;
- read_lock(&pathtbl_resize_lock);
+ read_lock_bh(&pathtbl_resize_lock);
for_each_mesh_entry(mesh_paths, p, node, i) {
if (node->mpath->sdata != sdata)
continue;
} else
spin_unlock_bh(&mpath->state_lock);
}
- read_unlock(&pathtbl_resize_lock);
+ read_unlock_bh(&pathtbl_resize_lock);
}
void mesh_pathtbl_unregister(void)
}
}
- /* try to sample up to half of the availble rates during each interval */
+ /* try to sample up to half of the available rates during each interval */
mi->sample_count *= 4;
cur_prob = 0;
const struct mcs_group *group;
unsigned int tx_time, tx_time_rtscts, tx_time_data;
unsigned int cw = mp->cw_min;
+ unsigned int ctime = 0;
unsigned int t_slot = 9; /* FIXME */
unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len;
- tx_time = 2 * (t_slot + mi->overhead + tx_time_data);
- tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data);
+
+ /* Contention time for first 2 tries */
+ ctime = (t_slot * cw) >> 1;
+ cw = min((cw << 1) | 1, mp->cw_max);
+ ctime += (t_slot * cw) >> 1;
+ cw = min((cw << 1) | 1, mp->cw_max);
+
+ /* Total TX time for data and Contention after first 2 tries */
+ tx_time = ctime + 2 * (mi->overhead + tx_time_data);
+ tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data);
+
+ /* See how many more tries we can fit inside segment size */
do {
- cw = (cw << 1) | 1;
- cw = min(cw, mp->cw_max);
- tx_time += cw + t_slot + mi->overhead;
- tx_time_rtscts += cw + t_slot + mi->overhead_rtscts;
+ /* Contention time for this try */
+ ctime = (t_slot * cw) >> 1;
+ cw = min((cw << 1) | 1, mp->cw_max);
+
+ /* Total TX time after this try */
+ tx_time += ctime + mi->overhead + tx_time_data;
+ tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data;
+
if (tx_time_rtscts < mp->segment_size)
mr->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
* specs were sane enough this time around to require padding each A-MSDU
* subframe to a length that is a multiple of four.
*
- * Padding like Atheros hardware adds which is inbetween the 802.11 header and
+ * Padding like Atheros hardware adds which is between the 802.11 header and
* the payload is not supported, the driver is required to move the 802.11
* header to be directly in front of the payload in that case.
*/
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
struct sk_buff *skb = rx->skb;
- if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
+ if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+ !local->sched_scanning))
return RX_CONTINUE;
if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning))
+ test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ local->sched_scanning)
return ieee80211_scan_rx(rx->sdata, skb);
/* scanning finished during invoking of handlers */
* establisment frame, beacon or probe, drop the frame.
*/
- if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) {
+ if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
struct ieee80211_mgmt *mgmt;
if (!ieee80211_is_mgmt(hdr->frame_control))
return RX_DROP_MONITOR;
if (ieee80211_is_action(hdr->frame_control)) {
+ u8 category;
mgmt = (struct ieee80211_mgmt *)hdr;
- if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
+ category = mgmt->u.action.category;
+ if (category != WLAN_CATEGORY_MESH_ACTION &&
+ category != WLAN_CATEGORY_SELF_PROTECTED)
return RX_DROP_MONITOR;
return RX_CONTINUE;
}
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
- rx->local->hw.extra_tx_headroom);
+ rx->local->hw.extra_tx_headroom, true);
while (!skb_queue_empty(&frame_list)) {
rx->skb = __skb_dequeue(&frame_list);
goto handled;
}
break;
- case WLAN_CATEGORY_MESH_PLINK:
+ case WLAN_CATEGORY_MESH_ACTION:
if (!ieee80211_vif_is_mesh(&sdata->vif))
break;
goto queue;
* Station entries are added by mac80211 when you establish a link with a
* peer. This means different things for the different type of interfaces
* we support. For a regular station this mean we add the AP sta when we
- * receive an assocation response from the AP. For IBSS this occurs when
+ * receive an association response from the AP. For IBSS this occurs when
* get to know about a peer on the same IBSS. For WDS we add the sta for
- * the peer imediately upon device open. When using AP mode we add stations
+ * the peer immediately upon device open. When using AP mode we add stations
* for each respective station upon request from userspace through nl80211.
*
* In order to remove a STA info structure, various sta_info_destroy_*()
{
struct sta_info *s;
- s = local->sta_hash[STA_HASH(sta->sta.addr)];
+ s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)],
+ lockdep_is_held(&local->sta_lock));
if (!s)
return -ENOENT;
if (s == sta) {
return 0;
}
- while (s->hnext && s->hnext != sta)
- s = s->hnext;
- if (s->hnext) {
+ while (rcu_access_pointer(s->hnext) &&
+ rcu_access_pointer(s->hnext) != sta)
+ s = rcu_dereference_protected(s->hnext,
+ lockdep_is_held(&local->sta_lock));
+ if (rcu_access_pointer(s->hnext)) {
rcu_assign_pointer(s->hnext, sta->hnext);
return 0;
}
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
#ifdef CONFIG_MAC80211_MESH
- sta->plink_state = PLINK_LISTEN;
+ sta->plink_state = NL80211_PLINK_LISTEN;
init_timer(&sta->plink_timer);
#endif
if (ret)
return ret;
+ mutex_lock(&local->key_mtx);
for (i = 0; i < NUM_DEFAULT_KEYS; i++)
- ieee80211_key_free(local, sta->gtk[i]);
+ __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
if (sta->ptk)
- ieee80211_key_free(local, sta->ptk);
+ __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
+ mutex_unlock(&local->key_mtx);
sta->dead = true;
for (i = 0; i < regd->n_reg_rules; i++) {
const struct ieee80211_reg_rule *rr;
const struct ieee80211_freq_range *fr = NULL;
- const struct ieee80211_power_rule *pr = NULL;
rr = ®d->reg_rules[i];
fr = &rr->freq_range;
- pr = &rr->power_rule;
/*
* We only need to know if one frequency rule was
if (r) {
/*
* We will disable all channels that do not match our
- * recieved regulatory rule unless the hint is coming
+ * received regulatory rule unless the hint is coming
* from a Country IE and the Country IE had no information
* about a band. The IEEE 802.11 spec allows for an AP
* to send only a subset of the regulatory rules allowed,
request_wiphy && request_wiphy == wiphy &&
request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
/*
- * This gaurantees the driver's requested regulatory domain
+ * This guarantees the driver's requested regulatory domain
* will always be used as a base for further regulatory
* settings
*/