]> Git Repo - linux.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <[email protected]>
Tue, 19 Apr 2011 07:21:33 +0000 (00:21 -0700)
committerDavid S. Miller <[email protected]>
Tue, 19 Apr 2011 07:21:33 +0000 (00:21 -0700)
Conflicts:
drivers/net/bnx2x/bnx2x_ethtool.c

15 files changed:
1  2 
MAINTAINERS
drivers/net/bna/bfa_ioc.c
drivers/net/bna/bnad.c
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_main.c
drivers/net/sfc/efx.c
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/recv.c
net/core/dev.c
net/ipv4/inet_connection_sock.c

diff --combined MAINTAINERS
index e1b056333b573299eee017aef03199ffd9e688db,c85368d440946464c60cb6c39dd1e3e2ae9ba1dd..b5266ad5016798a99bf5445d5f0508bb684d3942
@@@ -151,6 -151,7 +151,7 @@@ S: Maintaine
  F:    drivers/net/hamradio/6pack.c
  
  8169 10/100/1000 GIGABIT ETHERNET DRIVER
+ M:    Realtek linux nic maintainers <[email protected]>
  M:    Francois Romieu <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -1224,6 -1225,13 +1225,6 @@@ W:     http://wireless.kernel.org/en/users/
  S:    Supported
  F:    drivers/net/wireless/ath/ath9k/
  
 -ATHEROS AR9170 WIRELESS DRIVER
 -M:    Christian Lamparter <[email protected]>
 -L:    [email protected]
 -W:    http://wireless.kernel.org/en/users/Drivers/ar9170
 -S:    Obsolete
 -F:    drivers/net/wireless/ath/ar9170/
 -
  CARL9170 LINUX COMMUNITY WIRELESS DRIVER
  M:    Christian Lamparter <[email protected]>
  L:    [email protected]
@@@ -3348,12 -3356,6 +3349,12 @@@ F:    Documentation/wimax/README.i2400
  F:    drivers/net/wimax/i2400m/
  F:    include/linux/wimax/i2400m.h
  
 +INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
 +M:    Stanislaw Gruszka <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/net/wireless/iwlegacy/
 +
  INTEL WIRELESS WIFI LINK (iwlwifi)
  M:    Wey-Yi Guy <[email protected]>
  M:    Intel Linux Wireless <[email protected]>
index c1c9e70eec2ff03835c47bfeb55ad444f8541560,7581518ecfa2eade4c4bb4cba60601e5f39b7be2..ba2a4e13cf4fea352898ede24d55a8deea99c295
@@@ -38,6 -38,8 +38,8 @@@
  #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  #define bfa_ioc_notify_fail(__ioc)                    \
                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+ #define bfa_ioc_sync_start(__ioc)               \
+                       ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  #define bfa_ioc_sync_join(__ioc)                      \
                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  #define bfa_ioc_sync_leave(__ioc)                     \
@@@ -602,7 -604,7 +604,7 @@@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *
        switch (event) {
        case IOCPF_E_SEMLOCKED:
                if (bfa_ioc_firmware_lock(ioc)) {
-                       if (bfa_ioc_sync_complete(ioc)) {
+                       if (bfa_ioc_sync_start(ioc)) {
                                iocpf->retry_count = 0;
                                bfa_ioc_sync_join(ioc);
                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@@ -1272,12 -1274,13 +1274,12 @@@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc
  void
  bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
  {
 -      u32     pgnum, pgoff;
 +      u32     pgnum;
        u32     loff = 0;
        int             i;
        u32     *fwsig = (u32 *) fwhdr;
  
        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 -      pgoff = bfa_ioc_smem_pgoff(ioc, loff);
        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  
        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
@@@ -1313,7 -1316,7 +1315,7 @@@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *io
   * execution context (driver/bios) must match.
   */
  static bool
- bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
  {
        struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
  
        if (fwhdr.signature != drv_fwhdr->signature)
                return false;
  
-       if (fwhdr.exec != drv_fwhdr->exec)
+       if (swab32(fwhdr.param) != boot_env)
                return false;
  
        return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@@ -1351,9 -1354,12 +1353,12 @@@ bfa_ioc_hwinit(struct bfa_ioc *ioc, boo
  {
        enum bfi_ioc_state ioc_fwstate;
        bool fwvalid;
+       u32 boot_env;
  
        ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  
+       boot_env = BFI_BOOT_LOADER_OS;
        if (force)
                ioc_fwstate = BFI_IOC_UNINIT;
  
         * check if firmware is valid
         */
        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
-               false : bfa_ioc_fwver_valid(ioc);
+               false : bfa_ioc_fwver_valid(ioc, boot_env);
  
        if (!fwvalid) {
-               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+               bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
                return;
        }
  
        /**
         * Initialize the h/w for any other states.
         */
-       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+       bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
  }
  
  void
@@@ -1505,10 -1511,10 +1510,10 @@@ bfa_ioc_hb_stop(struct bfa_ioc *ioc
   */
  static void
  bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
-                   u32 boot_param)
+                   u32 boot_env)
  {
        u32 *fwimg;
 -      u32 pgnum, pgoff;
 +      u32 pgnum;
        u32 loff = 0;
        u32 chunkno = 0;
        u32 i;
        fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
  
        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 -      pgoff = bfa_ioc_smem_pgoff(ioc, loff);
  
        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  
        /*
         * Set boot type and boot param at the end.
        */
-       writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+       writel(boot_type, ((ioc->ioc_regs.smem_page_start)
                        + (BFI_BOOT_TYPE_OFF)));
-       writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
-                       + (BFI_BOOT_PARAM_OFF)));
+       writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+                       + (BFI_BOOT_LOADER_OFF)));
  }
  
  static void
@@@ -1719,7 -1726,7 +1724,7 @@@ bfa_ioc_pll_init(struct bfa_ioc *ioc
   * as the entry vector.
   */
  static void
- bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
  {
        void __iomem *rb;
  
         * Initialize IOC state of all functions on a chip reset.
         */
        rb = ioc->pcidev.pci_bar_kva;
-       if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+       if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
                writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
        } else {
        }
  
        bfa_ioc_msgflush(ioc);
-       bfa_ioc_download_fw(ioc, boot_type, boot_param);
+       bfa_ioc_download_fw(ioc, boot_type, boot_env);
  
        /**
         * Enable interrupts just before starting LPU
diff --combined drivers/net/bna/bnad.c
index b9f253470da280e4d6f5bff3511ea1817d91860b,8e6ceab9f4d800e4cbfee2cac22a297d0d987396..e588511f47fb0560a4aa1f5e31b3b18c9a238ba8
@@@ -501,7 -501,7 +501,7 @@@ bnad_poll_cq(struct bnad *bnad, struct 
  
                skb_put(skb, ntohs(cmpl->length));
                if (likely
 -                  (bnad->rx_csum &&
 +                  ((bnad->netdev->features & NETIF_F_RXCSUM) &&
                     (((flags & BNA_CQ_EF_IPV4) &&
                      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
                      (flags & BNA_CQ_EF_IPV6)) &&
@@@ -1837,7 -1837,6 +1837,6 @@@ bnad_setup_rx(struct bnad *bnad, uint r
        /* Initialize the Rx event handlers */
        rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
        rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
-       rx_cbfn.rcb_destroy_cbfn = NULL;
        rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
        rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
        rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
@@@ -2903,20 -2902,23 +2902,20 @@@ bnad_netdev_init(struct bnad *bnad, boo
  {
        struct net_device *netdev = bnad->netdev;
  
 -      netdev->features |= NETIF_F_IPV6_CSUM;
 -      netdev->features |= NETIF_F_TSO;
 -      netdev->features |= NETIF_F_TSO6;
 +      netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
 +              NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +              NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
  
 -      netdev->features |= NETIF_F_GRO;
 -      pr_warn("bna: GRO enabled, using kernel stack GRO\n");
 +      netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
 +              NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 +              NETIF_F_TSO | NETIF_F_TSO6;
  
 -      netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 +      netdev->features |= netdev->hw_features |
 +              NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
  
        if (using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
  
 -      netdev->features |=
 -              NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
 -              NETIF_F_HW_VLAN_FILTER;
 -
 -      netdev->vlan_features = netdev->features;
        netdev->mem_start = bnad->mmio_start;
        netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
  
@@@ -2967,6 -2969,7 +2966,6 @@@ bnad_init(struct bnad *bnad
  
        bnad->txq_depth = BNAD_TXQ_DEPTH;
        bnad->rxq_depth = BNAD_RXQ_DEPTH;
 -      bnad->rx_csum = true;
  
        bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
        bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
index e711a2292446153b9d7bde23258892e57b47ab80,89cb977898cb5cfbce98bd53422c9d27128761e5..4f42c314986cb2709c17b72998f4c2afc99084f0
@@@ -167,7 -167,6 +167,7 @@@ static int bnx2x_get_settings(struct ne
  {
        struct bnx2x *bp = netdev_priv(dev);
        int cfg_idx = bnx2x_get_link_cfg_idx(bp);
 +
        /* Dual Media boards present all available port types */
        cmd->supported = bp->port.supported[cfg_idx] |
                (bp->port.supported[cfg_idx ^ 1] &
        if ((bp->state == BNX2X_STATE_OPEN) &&
            !(bp->flags & MF_FUNC_DIS) &&
            (bp->link_vars.link_up)) {
 -              cmd->speed = bp->link_vars.line_speed;
 +              ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
                cmd->duplex = bp->link_vars.duplex;
        } else {
 -
 -              cmd->speed = bp->link_params.req_line_speed[cfg_idx];
 +              ethtool_cmd_speed_set(
 +                      cmd, bp->link_params.req_line_speed[cfg_idx]);
                cmd->duplex = bp->link_params.req_duplex[cfg_idx];
        }
  
        if (IS_MF(bp))
 -              cmd->speed = bnx2x_get_mf_speed(bp);
 +              ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
  
        if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
                cmd->port = PORT_TP;
        cmd->maxrxpkt = 0;
  
        DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
 -         DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
 +         DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %u\n"
           DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
           DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
 -         cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
 +         cmd->cmd, cmd->supported, cmd->advertising,
 +         ethtool_cmd_speed(cmd),
           cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
           cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
  
@@@ -228,15 -226,16 +228,15 @@@ static int bnx2x_set_settings(struct ne
                return 0;
  
        DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
 -         "  supported 0x%x  advertising 0x%x  speed %d speed_hi %d\n"
 +         "  supported 0x%x  advertising 0x%x  speed %u\n"
           "  duplex %d  port %d  phy_address %d  transceiver %d\n"
           "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
 -         cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
 -         cmd->speed_hi,
 +         cmd->cmd, cmd->supported, cmd->advertising,
 +         ethtool_cmd_speed(cmd),
           cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
           cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
  
 -      speed = cmd->speed;
 -      speed |= (cmd->speed_hi << 16);
 +      speed = ethtool_cmd_speed(cmd);
  
        if (IS_MF_SI(bp)) {
                u32 part;
                        break;
  
                default:
 -                      DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed);
 +                      DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
                        return -EINVAL;
                }
  
@@@ -1300,6 -1299,91 +1300,6 @@@ static int bnx2x_set_pauseparam(struct 
        return 0;
  }
  
 -static int bnx2x_set_flags(struct net_device *dev, u32 data)
 -{
 -      struct bnx2x *bp = netdev_priv(dev);
 -      int changed = 0;
 -      int rc = 0;
 -
 -      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 -              printk(KERN_ERR "Handling parity error recovery. Try again later\n");
 -              return -EAGAIN;
 -      }
 -
 -      if (!(data & ETH_FLAG_RXVLAN))
 -              return -EINVAL;
 -
 -      if ((data & ETH_FLAG_LRO) && bp->rx_csum && bp->disable_tpa)
 -              return -EINVAL;
 -
 -      rc = ethtool_op_set_flags(dev, data, ETH_FLAG_LRO | ETH_FLAG_RXVLAN |
 -                                      ETH_FLAG_TXVLAN | ETH_FLAG_RXHASH);
 -      if (rc)
 -              return rc;
 -
 -      /* TPA requires Rx CSUM offloading */
 -      if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
 -              if (!(bp->flags & TPA_ENABLE_FLAG)) {
 -                      bp->flags |= TPA_ENABLE_FLAG;
 -                      changed = 1;
 -              }
 -      } else if (bp->flags & TPA_ENABLE_FLAG) {
 -              dev->features &= ~NETIF_F_LRO;
 -              bp->flags &= ~TPA_ENABLE_FLAG;
 -              changed = 1;
 -      }
 -
 -      if (changed && netif_running(dev)) {
 -              bnx2x_nic_unload(bp, UNLOAD_NORMAL);
 -              rc = bnx2x_nic_load(bp, LOAD_NORMAL);
 -      }
 -
 -      return rc;
 -}
 -
 -static u32 bnx2x_get_rx_csum(struct net_device *dev)
 -{
 -      struct bnx2x *bp = netdev_priv(dev);
 -
 -      return bp->rx_csum;
 -}
 -
 -static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
 -{
 -      struct bnx2x *bp = netdev_priv(dev);
 -      int rc = 0;
 -
 -      if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
 -              printk(KERN_ERR "Handling parity error recovery. Try again later\n");
 -              return -EAGAIN;
 -      }
 -
 -      bp->rx_csum = data;
 -
 -      /* Disable TPA, when Rx CSUM is disabled. Otherwise all
 -         TPA'ed packets will be discarded due to wrong TCP CSUM */
 -      if (!data) {
 -              u32 flags = ethtool_op_get_flags(dev);
 -
 -              rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
 -      }
 -
 -      return rc;
 -}
 -
 -static int bnx2x_set_tso(struct net_device *dev, u32 data)
 -{
 -      if (data) {
 -              dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
 -              dev->features |= NETIF_F_TSO6;
 -      } else {
 -              dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
 -              dev->features &= ~NETIF_F_TSO6;
 -      }
 -
 -      return 0;
 -}
 -
  static const struct {
        char string[ETH_GSTRING_LEN];
  } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
@@@ -2013,38 -2097,35 +2013,37 @@@ static void bnx2x_get_ethtool_stats(str
        }
  }
  
 -static int bnx2x_phys_id(struct net_device *dev, u32 data)
 +static int bnx2x_set_phys_id(struct net_device *dev,
 +                           enum ethtool_phys_id_state state)
  {
        struct bnx2x *bp = netdev_priv(dev);
 -      int i;
  
        if (!netif_running(dev))
 -              return 0;
 +              return -EAGAIN;
  
        if (!bp->port.pmf)
 -              return 0;
 +              return -EOPNOTSUPP;
  
 -      if (data == 0)
 -              data = 2;
 +      switch (state) {
 +      case ETHTOOL_ID_ACTIVE:
 +              return 1;       /* cycle on/off once per second */
  
 -      for (i = 0; i < (data * 2); i++) {
 -              if ((i % 2) == 0)
 -                      bnx2x_set_led(&bp->link_params, &bp->link_vars,
 -                                    LED_MODE_ON, SPEED_1000);
 -              else
 -                      bnx2x_set_led(&bp->link_params, &bp->link_vars,
 -                                    LED_MODE_FRONT_PANEL_OFF, 0);
 +      case ETHTOOL_ID_ON:
 +              bnx2x_set_led(&bp->link_params, &bp->link_vars,
-                             LED_MODE_OPER, SPEED_1000);
++                            LED_MODE_ON, SPEED_1000);
 +              break;
  
 -              msleep_interruptible(500);
 -              if (signal_pending(current))
 -                      break;
 -      }
 +      case ETHTOOL_ID_OFF:
 +              bnx2x_set_led(&bp->link_params, &bp->link_vars,
-                             LED_MODE_OFF, 0);
++                            LED_MODE_FRONT_PANEL_OFF, 0);
 +
 +              break;
  
 -      bnx2x_set_led(&bp->link_params, &bp->link_vars,
 -                    LED_MODE_OPER, bp->link_vars.line_speed);
 +      case ETHTOOL_ID_INACTIVE:
-               if (bp->link_vars.link_up)
-                       bnx2x_set_led(&bp->link_params, &bp->link_vars,
-                                     LED_MODE_OPER,
-                                     bp->link_vars.line_speed);
++              bnx2x_set_led(&bp->link_params, &bp->link_vars,
++                            LED_MODE_OPER,
++                            bp->link_vars.line_speed);
 +      }
  
        return 0;
  }
@@@ -2123,10 -2204,20 +2122,10 @@@ static const struct ethtool_ops bnx2x_e
        .set_ringparam          = bnx2x_set_ringparam,
        .get_pauseparam         = bnx2x_get_pauseparam,
        .set_pauseparam         = bnx2x_set_pauseparam,
 -      .get_rx_csum            = bnx2x_get_rx_csum,
 -      .set_rx_csum            = bnx2x_set_rx_csum,
 -      .get_tx_csum            = ethtool_op_get_tx_csum,
 -      .set_tx_csum            = ethtool_op_set_tx_hw_csum,
 -      .set_flags              = bnx2x_set_flags,
 -      .get_flags              = ethtool_op_get_flags,
 -      .get_sg                 = ethtool_op_get_sg,
 -      .set_sg                 = ethtool_op_set_sg,
 -      .get_tso                = ethtool_op_get_tso,
 -      .set_tso                = bnx2x_set_tso,
        .self_test              = bnx2x_self_test,
        .get_sset_count         = bnx2x_get_sset_count,
        .get_strings            = bnx2x_get_strings,
 -      .phys_id                = bnx2x_phys_id,
 +      .set_phys_id            = bnx2x_set_phys_id,
        .get_ethtool_stats      = bnx2x_get_ethtool_stats,
        .get_rxnfc              = bnx2x_get_rxnfc,
        .get_rxfh_indir         = bnx2x_get_rxfh_indir,
index b6e0fc33585f77357b5630c2a25ec822b6d1e6be,b0dead00b2d1d6b7809bd2b01906b5bff5a786b5..e5d30538f3730d2100abd777ad8bb279a6b657ce
  
  #include <linux/io.h>
  #include <asm/byteorder.h>
 +#include <linux/bitops.h>
 +#include <linux/if_vlan.h>
  
  #include "qlcnic_hdr.h"
  
  #define _QLCNIC_LINUX_MAJOR 5
  #define _QLCNIC_LINUX_MINOR 0
 -#define _QLCNIC_LINUX_SUBVERSION 15
 -#define QLCNIC_LINUX_VERSIONID  "5.0.15"
 +#define _QLCNIC_LINUX_SUBVERSION 16
 +#define QLCNIC_LINUX_VERSIONID  "5.0.16"
  #define QLCNIC_DRV_IDC_VER  0x01
  #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
  #define TX_IP_PKT     0x04
  #define TX_TCP_LSO    0x05
  #define TX_TCP_LSO6   0x06
 -#define TX_IPSEC      0x07
 -#define TX_IPSEC_CMD  0x0a
  #define TX_TCPV6_PKT  0x0b
  #define TX_UDPV6_PKT  0x0c
  
  /* Tx defines */
+ #define QLCNIC_MAX_FRAGS_PER_TX       14
  #define MAX_TSO_HEADER_DESC   2
  #define MGMT_CMD_DESC_RESV    4
  #define TX_STOP_THRESH                ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@@ -200,7 -201,7 +201,7 @@@ struct rcv_desc 
        __le16 reserved;
        __le32 buffer_length;   /* allocated buffer length (usually 2K) */
        __le64 addr_buffer;
 -};
 +} __packed;
  
  /* opcode field in status_desc */
  #define QLCNIC_SYN_OFFLOAD    0x03
@@@ -292,7 -293,6 +293,7 @@@ struct uni_data_desc
  /* Flash Defines and Structures */
  #define QLCNIC_FLT_LOCATION   0x3F1000
  #define QLCNIC_FW_IMAGE_REGION        0x74
 +#define QLCNIC_BOOTLD_REGION    0X72
  struct qlcnic_flt_header {
        u16 version;
        u16 len;
@@@ -307,7 -307,7 +308,7 @@@ struct qlcnic_flt_entry 
        u8 reserved1;
        u32 size;
        u32 start_addr;
 -      u32 end_add;
 +      u32 end_addr;
  };
  
  /* Magic number to let user know flash is programmed */
@@@ -366,6 -366,12 +367,6 @@@ struct qlcnic_skb_frag 
        u64 length;
  };
  
 -struct qlcnic_recv_crb {
 -      u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
 -      u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
 -      u32 sw_int_mask[NUM_STS_DESC_RINGS];
 -};
 -
  /*    Following defines are for the state of the buffers    */
  #define       QLCNIC_BUFFER_FREE      0
  #define       QLCNIC_BUFFER_BUSY      1
@@@ -382,35 -388,16 +383,35 @@@ struct qlcnic_cmd_buffer 
  
  /* In rx_buffer, we do not need multiple fragments as is a single buffer */
  struct qlcnic_rx_buffer {
 -      struct list_head list;
 +      u16 ref_handle;
        struct sk_buff *skb;
 +      struct list_head list;
        u64 dma;
 -      u16 ref_handle;
  };
  
  /* Board types */
  #define       QLCNIC_GBE      0x01
  #define       QLCNIC_XGBE     0x02
  
 +/*
 + * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
 + * adjusted based on configured MTU.
 + */
 +#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US       3
 +#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS       256
 +
 +#define QLCNIC_INTR_DEFAULT                   0x04
 +#define QLCNIC_CONFIG_INTR_COALESCE           3
 +
 +struct qlcnic_nic_intr_coalesce {
 +      u8      type;
 +      u8      sts_ring_mask;
 +      u16     rx_packets;
 +      u16     rx_time_us;
 +      u16     flag;
 +      u32     timer_out;
 +};
 +
  /*
   * One hardware_context{} per adapter
   * contains interrupt info as well shared hardware info.
@@@ -429,8 -416,6 +430,8 @@@ struct qlcnic_hardware_context 
        u8 linkup;
        u16 port_type;
        u16 board_type;
 +
 +      struct qlcnic_nic_intr_coalesce coal;
  };
  
  struct qlcnic_adapter_stats {
   * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
   */
  struct qlcnic_host_rds_ring {
 -      u32 producer;
 +      void __iomem *crb_rcv_producer;
 +      struct rcv_desc *desc_head;
 +      struct qlcnic_rx_buffer *rx_buf_arr;
        u32 num_desc;
 +      u32 producer;
        u32 dma_size;
        u32 skb_size;
        u32 flags;
 -      void __iomem *crb_rcv_producer;
 -      struct rcv_desc *desc_head;
 -      struct qlcnic_rx_buffer *rx_buf_arr;
        struct list_head free_list;
        spinlock_t lock;
        dma_addr_t phys_addr;
 -};
 +} ____cacheline_internodealigned_in_smp;
  
  struct qlcnic_host_sds_ring {
        u32 consumer;
        u32 num_desc;
        void __iomem *crb_sts_consumer;
 -      void __iomem *crb_intr_mask;
  
        struct status_desc *desc_head;
        struct qlcnic_adapter *adapter;
        struct napi_struct napi;
        struct list_head free_list[NUM_RCV_DESC_RINGS];
  
 +      void __iomem *crb_intr_mask;
        int irq;
  
        dma_addr_t phys_addr;
        char name[IFNAMSIZ+4];
 -};
 +} ____cacheline_internodealigned_in_smp;
  
  struct qlcnic_host_tx_ring {
        u32 producer;
 -      __le32 *hw_consumer;
        u32 sw_consumer;
 -      void __iomem *crb_cmd_producer;
        u32 num_desc;
 -
 -      struct netdev_queue *txq;
 -
 -      struct qlcnic_cmd_buffer *cmd_buf_arr;
 +      void __iomem *crb_cmd_producer;
        struct cmd_desc_type0 *desc_head;
 +      struct qlcnic_cmd_buffer *cmd_buf_arr;
 +      __le32 *hw_consumer;
 +
        dma_addr_t phys_addr;
        dma_addr_t hw_cons_phys_addr;
 -};
 +      struct netdev_queue *txq;
 +} ____cacheline_internodealigned_in_smp;
  
  /*
   * Receive context. There is one such structure per instance of the
   * present elsewhere.
   */
  struct qlcnic_recv_context {
 +      struct qlcnic_host_rds_ring *rds_rings;
 +      struct qlcnic_host_sds_ring *sds_rings;
        u32 state;
        u16 context_id;
        u16 virt_port;
  
 -      struct qlcnic_host_rds_ring *rds_rings;
 -      struct qlcnic_host_sds_ring *sds_rings;
  };
  
  /* HW context creation */
  #define QLCNIC_CDRP_CMD_DESTROY_RX_CTX          0x00000008
  #define QLCNIC_CDRP_CMD_CREATE_TX_CTX           0x00000009
  #define QLCNIC_CDRP_CMD_DESTROY_TX_CTX          0x0000000a
 -#define QLCNIC_CDRP_CMD_SETUP_STATISTICS        0x0000000e
 -#define QLCNIC_CDRP_CMD_GET_STATISTICS          0x0000000f
 -#define QLCNIC_CDRP_CMD_DELETE_STATISTICS       0x00000010
  #define QLCNIC_CDRP_CMD_SET_MTU                 0x00000012
  #define QLCNIC_CDRP_CMD_READ_PHY              0x00000013
  #define QLCNIC_CDRP_CMD_WRITE_PHY             0x00000014
  #define QLCNIC_CDRP_CMD_SET_FLOW_CTL          0x00000017
  #define QLCNIC_CDRP_CMD_READ_MAX_MTU          0x00000018
  #define QLCNIC_CDRP_CMD_READ_MAX_LRO          0x00000019
 -#define QLCNIC_CDRP_CMD_CONFIGURE_TOE         0x0000001a
 -#define QLCNIC_CDRP_CMD_FUNC_ATTRIB           0x0000001b
 -#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS  0x0000001c
 -#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES  0x0000001d
 -#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD        0x0000001e
  #define QLCNIC_CDRP_CMD_MAC_ADDRESS           0x0000001f
  
  #define QLCNIC_CDRP_CMD_GET_PCI_INFO          0x00000020
  #define QLCNIC_CDRP_CMD_GET_NIC_INFO          0x00000021
  #define QLCNIC_CDRP_CMD_SET_NIC_INFO          0x00000022
 -#define QLCNIC_CDRP_CMD_RESET_NPAR            0x00000023
  #define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY        0x00000024
  #define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH                0x00000025
  #define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS    0x00000026
@@@ -603,14 -598,14 +604,14 @@@ struct qlcnic_hostrq_sds_ring 
        __le32 ring_size;               /* Ring entries */
        __le16 msi_index;
        __le16 rsvd;            /* Padding */
 -};
 +} __packed;
  
  struct qlcnic_hostrq_rds_ring {
        __le64 host_phys_addr;  /* Ring base addr */
        __le64 buff_size;               /* Packet buffer size */
        __le32 ring_size;               /* Ring entries */
        __le32 ring_kind;               /* Class of ring */
 -};
 +} __packed;
  
  struct qlcnic_hostrq_rx_ctx {
        __le64 host_rsp_dma_addr;       /* Response dma'd here */
           - N hostrq_rds_rings
           - N hostrq_sds_rings */
        char data[0];
 -};
 +} __packed;
  
  struct qlcnic_cardrsp_rds_ring{
        __le32 host_producer_crb;       /* Crb to use */
        __le32 rsvd1;           /* Padding */
 -};
 +} __packed;
  
  struct qlcnic_cardrsp_sds_ring {
        __le32 host_consumer_crb;       /* Crb to use */
        __le32 interrupt_crb;   /* Crb to use */
 -};
 +} __packed;
  
  struct qlcnic_cardrsp_rx_ctx {
        /* These ring offsets are relative to data[0] below */
           - N cardrsp_rds_rings
           - N cardrs_sds_rings */
        char data[0];
 -};
 +} __packed;
  
  #define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings)     \
        (sizeof(HOSTRQ_RX) +                                    \
@@@ -680,7 -675,7 +681,7 @@@ struct qlcnic_hostrq_cds_ring 
        __le64 host_phys_addr;  /* Ring base addr */
        __le32 ring_size;               /* Ring entries */
        __le32 rsvd;            /* Padding */
 -};
 +} __packed;
  
  struct qlcnic_hostrq_tx_ctx {
        __le64 host_rsp_dma_addr;       /* Response dma'd here */
        __le16 rsvd3;           /* Padding */
        struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
        u8  reserved[128];      /* future expansion */
 -};
 +} __packed;
  
  struct qlcnic_cardrsp_cds_ring {
        __le32 host_producer_crb;       /* Crb to use */
        __le32 interrupt_crb;   /* Crb to use */
 -};
 +} __packed;
  
  struct qlcnic_cardrsp_tx_ctx {
        __le32 host_ctx_state;  /* Starting state */
        u8  virt_port;          /* Virtual/Logical id of port */
        struct qlcnic_cardrsp_cds_ring cds_ring;        /* Card cds settings */
        u8  reserved[128];      /* future expansion */
 -};
 +} __packed;
  
  #define SIZEOF_HOSTRQ_TX(HOSTRQ_TX)   (sizeof(HOSTRQ_TX))
  #define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
@@@ -743,6 -738,40 +744,6 @@@ struct qlcnic_mac_list_s 
        uint8_t mac_addr[ETH_ALEN+2];
  };
  
 -/*
 - * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
 - * adjusted based on configured MTU.
 - */
 -#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US       3
 -#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS       256
 -#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS       64
 -#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US       4
 -
 -#define QLCNIC_INTR_DEFAULT                   0x04
 -
 -union qlcnic_nic_intr_coalesce_data {
 -      struct {
 -              u16     rx_packets;
 -              u16     rx_time_us;
 -              u16     tx_packets;
 -              u16     tx_time_us;
 -      } data;
 -      u64             word;
 -};
 -
 -struct qlcnic_nic_intr_coalesce {
 -      u16             stats_time_us;
 -      u16             rate_sample_time;
 -      u16             flags;
 -      u16             rsvd_1;
 -      u32             low_threshold;
 -      u32             high_threshold;
 -      union qlcnic_nic_intr_coalesce_data     normal;
 -      union qlcnic_nic_intr_coalesce_data     low;
 -      union qlcnic_nic_intr_coalesce_data     high;
 -      union qlcnic_nic_intr_coalesce_data     irq;
 -};
 -
  #define QLCNIC_HOST_REQUEST   0x13
  #define QLCNIC_REQUEST                0x14
  
  /*
   * Driver --> Firmware
   */
 -#define QLCNIC_H2C_OPCODE_START                       0
 -#define QLCNIC_H2C_OPCODE_CONFIG_RSS                  1
 -#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL              2
 -#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE                3
 -#define QLCNIC_H2C_OPCODE_CONFIG_LED                  4
 -#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS          5
 -#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC               6
 -#define QLCNIC_H2C_OPCODE_LRO_REQUEST                 7
 -#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS              8
 -#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST         9
 -#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST          10
 -#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU               11
 -#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE   12
 -#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST    13
 -#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST     14
 -#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST      15
 -#define QLCNIC_H2C_OPCODE_GET_NET_STATS               16
 -#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V            17
 -#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR               18
 -#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE             20
 -#define QLCNIC_H2C_OPCODE_GET_LINKEVENT               21
 -#define QLCNIC_C2C_OPCODE                             22
 -#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING             23
 -#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO               24
 -#define QLCNIC_H2C_OPCODE_LAST                                25
 +#define QLCNIC_H2C_OPCODE_CONFIG_RSS                  0x1
 +#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE                0x3
 +#define QLCNIC_H2C_OPCODE_CONFIG_LED                  0x4
 +#define QLCNIC_H2C_OPCODE_LRO_REQUEST                 0x7
 +#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE                0xc
 +#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR               0x12
 +#define QLCNIC_H2C_OPCODE_GET_LINKEVENT               0x15
 +#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING             0x17
 +#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO               0x18
  /*
   * Firmware --> Driver
   */
  
 -#define QLCNIC_C2H_OPCODE_START                       128
 -#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE         129
 -#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE     130
 -#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE         131
 -#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
 -#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE      133
 -#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE         134
 -#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE    135
 -#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS              136
 -#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY      137
 -#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY       138
 -#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
 -#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE      140
  #define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE      141
 -#define QLCNIC_C2H_OPCODE_LAST                                142
  
  #define VPORT_MISS_MODE_DROP          0 /* drop all unmatched */
  #define VPORT_MISS_MODE_ACCEPT_ALL    1 /* accept all packets */
@@@ -836,7 -895,7 +837,7 @@@ struct qlcnic_nic_req 
        __le64 qhdr;
        __le64 req_hdr;
        __le64 words[6];
 -};
 +} __packed;
  
  struct qlcnic_mac_req {
        u8 op;
  struct qlcnic_vlan_req {
        __le16 vlan_id;
        __le16 rsvd[3];
 -};
 +} __packed;
  
  struct qlcnic_ipaddr {
        __be32 ipv4;
@@@ -906,15 -965,14 +907,15 @@@ struct qlcnic_filter_hash 
  };
  
  struct qlcnic_adapter {
 -      struct qlcnic_hardware_context ahw;
 -
 +      struct qlcnic_hardware_context *ahw;
 +      struct qlcnic_recv_context *recv_ctx;
 +      struct qlcnic_host_tx_ring *tx_ring;
        struct net_device *netdev;
        struct pci_dev *pdev;
 -      struct list_head mac_list;
  
 -      spinlock_t tx_clean_lock;
 -      spinlock_t mac_learn_lock;
 +      bool            blink_was_down;
 +      unsigned long state;
 +      u32 flags;
  
        u16 num_txd;
        u16 num_rxd;
  
        u8 mc_enabled;
        u8 max_mc_count;
 -      u8 rss_supported;
        u8 fw_wait_cnt;
        u8 fw_fail_cnt;
        u8 tx_timeo_cnt;
  
        u32 fw_hal_version;
        u32 capabilities;
 -      u32 flags;
        u32 irq;
        u32 temp;
  
        u8 mac_addr[ETH_ALEN];
  
        u64 dev_rst_time;
 +      unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
  
 -      struct vlan_group *vlgrp;
        struct qlcnic_npar_info *npars;
        struct qlcnic_eswitch *eswitch;
        struct qlcnic_nic_template *nic_ops;
  
        struct qlcnic_adapter_stats stats;
 -
 -      struct qlcnic_recv_context recv_ctx;
 -      struct qlcnic_host_tx_ring *tx_ring;
 +      struct list_head mac_list;
  
        void __iomem    *tgt_mask_reg;
        void __iomem    *tgt_status_reg;
  
        struct delayed_work fw_work;
  
 -      struct qlcnic_nic_intr_coalesce coal;
  
        struct qlcnic_filter_hash fhash;
  
 -      unsigned long state;
 +      spinlock_t tx_clean_lock;
 +      spinlock_t mac_learn_lock;
        __le32 file_prd_off;    /*File fw product offset*/
        u32 fw_version;
        const struct firmware *fw;
@@@ -1017,7 -1079,7 +1018,7 @@@ struct qlcnic_info 
        __le16  min_tx_bw;
        __le16  max_tx_bw;
        u8      reserved2[104];
 -};
 +} __packed;
  
  struct qlcnic_pci_info {
        __le16  id; /* pci function id */
  
        u8      mac[ETH_ALEN];
        u8      reserved2[106];
 -};
 +} __packed;
  
  struct qlcnic_npar_info {
        u16     pvid;
@@@ -1148,7 -1210,7 +1149,7 @@@ struct __qlcnic_esw_statistics 
        __le64 local_frames;
        __le64 numbytes;
        __le64 rsvd[3];
 -};
 +} __packed;
  
  struct qlcnic_esw_statistics {
        struct __qlcnic_esw_statistics rx;
@@@ -1232,7 -1294,7 +1233,7 @@@ void qlcnic_release_tx_buffers(struct q
  
  int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
  void qlcnic_watchdog_task(struct work_struct *work);
 -void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
 +void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
                struct qlcnic_host_rds_ring *rds_ring);
  int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
  void qlcnic_set_multi(struct net_device *netdev);
@@@ -1317,7 -1379,8 +1318,7 @@@ static const struct qlcnic_brdinfo qlcn
  
  static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
  {
 -      smp_mb();
 -      if (tx_ring->producer < tx_ring->sw_consumer)
 +      if (likely(tx_ring->producer < tx_ring->sw_consumer))
                return tx_ring->sw_consumer - tx_ring->producer;
        else
                return tx_ring->sw_consumer + tx_ring->num_desc -
index 7f9edb2f14748d705de6d80e63344efe66ff5594,cb1a1ef36c0ade56ddb5dfb98c2ef2ce8813b100..e9e9ba6efc5f5e9209916643bf71ca781c1c6f7e
@@@ -13,6 -13,7 +13,6 @@@
  
  #include <linux/swab.h>
  #include <linux/dma-mapping.h>
 -#include <linux/if_vlan.h>
  #include <net/ip.h>
  #include <linux/ipv6.h>
  #include <linux/inetdevice.h>
@@@ -97,9 -98,6 +97,9 @@@ static int qlcnicvf_config_bridged_mode
  static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
  static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
 +static void qlcnic_vlan_rx_add(struct net_device *, u16);
 +static void qlcnic_vlan_rx_del(struct net_device *, u16);
 +
  /*  PCI Device ID Table  */
  #define ENTRY(device) \
        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@@ -115,7 -113,7 +115,7 @@@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_p
  MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
  
  
 -void
 +inline void
  qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
                struct qlcnic_host_tx_ring *tx_ring)
  {
@@@ -171,7 -169,7 +171,7 @@@ qlcnic_napi_add(struct qlcnic_adapter *
  {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
 -      struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
                return -ENOMEM;
@@@ -195,14 -193,14 +195,14 @@@ qlcnic_napi_del(struct qlcnic_adapter *
  {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
 -      struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
                netif_napi_del(&sds_ring->napi);
        }
  
 -      qlcnic_free_sds_rings(&adapter->recv_ctx);
 +      qlcnic_free_sds_rings(adapter->recv_ctx);
  }
  
  static void
@@@ -210,7 -208,7 +210,7 @@@ qlcnic_napi_enable(struct qlcnic_adapte
  {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
 -      struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                return;
@@@ -227,7 -225,7 +227,7 @@@ qlcnic_napi_disable(struct qlcnic_adapt
  {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
 -      struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                return;
@@@ -319,6 -317,13 +319,6 @@@ static int qlcnic_set_mac(struct net_de
        return 0;
  }
  
 -static void qlcnic_vlan_rx_register(struct net_device *netdev,
 -              struct vlan_group *grp)
 -{
 -      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 -      adapter->vlgrp = grp;
 -}
 -
  static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_open          = qlcnic_open,
        .ndo_stop          = qlcnic_close,
        .ndo_set_mac_address    = qlcnic_set_mac,
        .ndo_change_mtu    = qlcnic_change_mtu,
        .ndo_tx_timeout    = qlcnic_tx_timeout,
 -      .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
 +      .ndo_vlan_rx_add_vid    = qlcnic_vlan_rx_add,
 +      .ndo_vlan_rx_kill_vid   = qlcnic_vlan_rx_del,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = qlcnic_poll_controller,
  #endif
@@@ -355,7 -359,7 +355,7 @@@ qlcnic_setup_intr(struct qlcnic_adapte
        struct pci_dev *pdev = adapter->pdev;
        int err, num_msix;
  
 -      if (adapter->rss_supported) {
 +      if (adapter->msix_supported) {
                num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
                        MSIX_ENTRIES_PER_ADAPTER : 2;
        } else
  
        adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
  
 -      legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
 +      legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
  
        adapter->int_vec_bit = legacy_intrp->int_vec_bit;
        adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
                        adapter->flags |= QLCNIC_MSIX_ENABLED;
                        qlcnic_set_msix_bit(pdev, 1);
  
 -                      if (adapter->rss_supported)
 -                              adapter->max_sds_rings = num_msix;
 +                      adapter->max_sds_rings = num_msix;
  
                        dev_info(&pdev->dev, "using msi-x interrupts\n");
                        return;
        if (use_msi && !pci_enable_msi(pdev)) {
                adapter->flags |= QLCNIC_MSI_ENABLED;
                adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
 -                              msi_tgt_status[adapter->ahw.pci_func]);
 +                              msi_tgt_status[adapter->ahw->pci_func]);
                dev_info(&pdev->dev, "using msi interrupts\n");
                adapter->msix_entries[0].vector = pdev->irq;
                return;
@@@ -424,8 -429,8 +424,8 @@@ qlcnic_teardown_intr(struct qlcnic_adap
  static void
  qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
  {
 -      if (adapter->ahw.pci_base0 != NULL)
 -              iounmap(adapter->ahw.pci_base0);
 +      if (adapter->ahw->pci_base0 != NULL)
 +              iounmap(adapter->ahw->pci_base0);
  }
  
  static int
@@@ -459,10 -464,8 +459,10 @@@ qlcnic_init_pci_info(struct qlcnic_adap
  
        for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
                pfn = pci_info[i].id;
 -              if (pfn > QLCNIC_MAX_PCI_FUNC)
 -                      return QL_STATUS_INVALID_PARAM;
 +              if (pfn > QLCNIC_MAX_PCI_FUNC) {
 +                      ret = QL_STATUS_INVALID_PARAM;
 +                      goto err_eswitch;
 +              }
                adapter->npars[pfn].active = (u8)pci_info[i].active;
                adapter->npars[pfn].type = (u8)pci_info[i].type;
                adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
@@@ -495,7 -498,7 +495,7 @@@ qlcnic_set_function_modes(struct qlcnic
        u32 ref_count;
        int i, ret = 1;
        u32 data = QLCNIC_MGMT_FUNC;
 -      void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
 +      void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
  
        /* If other drivers are not in use set their privilege level */
        ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
                for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
                        id = i;
                        if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
 -                              id == adapter->ahw.pci_func)
 +                              id == adapter->ahw->pci_func)
                                continue;
                        data |= (qlcnic_config_npars &
                                        QLC_DEV_SET_DRV(0xf, id));
                }
        } else {
                data = readl(priv_op);
 -              data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
 +              data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
                        (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
 -                      adapter->ahw.pci_func));
 +                      adapter->ahw->pci_func));
        }
        writel(data, priv_op);
        qlcnic_api_unlock(adapter);
@@@ -534,23 -537,22 +534,23 @@@ qlcnic_check_vf(struct qlcnic_adapter *
        u32 op_mode, priv_level;
  
        /* Determine FW API version */
 -      adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
 +      adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
 +                                      QLCNIC_FW_API);
  
        /* Find PCI function number */
        pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
 -      msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
 +      msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
        msix_base = readl(msix_base_addr);
        func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
 -      adapter->ahw.pci_func = func;
 +      adapter->ahw->pci_func = func;
  
        /* Determine function privilege level */
 -      priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
 +      priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
        op_mode = readl(priv_op);
        if (op_mode == QLC_DEV_DRV_DEFAULT)
                priv_level = QLCNIC_MGMT_FUNC;
        else
 -              priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
 +              priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
  
        if (priv_level == QLCNIC_NON_PRIV_FUNC) {
                adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
@@@ -589,14 -591,13 +589,14 @@@ qlcnic_setup_pci_map(struct qlcnic_adap
  
        dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
  
 -      adapter->ahw.pci_base0 = mem_ptr0;
 -      adapter->ahw.pci_len0 = pci_len0;
 +      adapter->ahw->pci_base0 = mem_ptr0;
 +      adapter->ahw->pci_len0 = pci_len0;
  
        qlcnic_check_vf(adapter);
  
 -      adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
 -              QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
 +      adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
 +              QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
 +                      adapter->ahw->pci_func)));
  
        return 0;
  }
@@@ -638,7 -639,7 +638,7 @@@ qlcnic_check_options(struct qlcnic_adap
  
        dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
                        fw_major, fw_minor, fw_build);
 -      if (adapter->ahw.port_type == QLCNIC_XGBE) {
 +      if (adapter->ahw->port_type == QLCNIC_XGBE) {
                if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
                        adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
                        adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
                adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
                adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
  
 -      } else if (adapter->ahw.port_type == QLCNIC_GBE) {
 +      } else if (adapter->ahw->port_type == QLCNIC_GBE) {
                adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
                adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
                adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
        }
  
        adapter->msix_supported = !!use_msi_x;
 -      adapter->rss_supported = !!use_msi_x;
  
        adapter->num_txd = MAX_CMD_DESCRIPTORS;
  
@@@ -670,7 -672,7 +670,7 @@@ qlcnic_initialize_nic(struct qlcnic_ada
        int err;
        struct qlcnic_info nic_info;
  
 -      err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
 +      err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
        if (err)
                return err;
  
@@@ -705,22 -707,6 +705,22 @@@ qlcnic_set_vlan_config(struct qlcnic_ad
                adapter->pvid = 0;
  }
  
 +static void
 +qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
 +{
 +      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 +      set_bit(vid, adapter->vlans);
 +}
 +
 +static void
 +qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
 +{
 +      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 +
 +      qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
 +      clear_bit(vid, adapter->vlans);
 +}
 +
  static void
  qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
                struct qlcnic_esw_func_cfg *esw_cfg)
@@@ -748,7 -734,7 +748,7 @@@ qlcnic_set_eswitch_port_config(struct q
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
                return 0;
  
 -      esw_cfg.pci_func = adapter->ahw.pci_func;
 +      esw_cfg.pci_func = adapter->ahw->pci_func;
        if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
                        return -EIO;
        qlcnic_set_vlan_config(adapter, &esw_cfg);
@@@ -767,14 -753,13 +767,14 @@@ qlcnic_set_netdev_features(struct qlcni
        features = (NETIF_F_SG | NETIF_F_IP_CSUM |
                        NETIF_F_IPV6_CSUM | NETIF_F_GRO);
        vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
 -                      NETIF_F_IPV6_CSUM);
 +                      NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
  
        if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
                features |= (NETIF_F_TSO | NETIF_F_TSO6);
                vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
        }
 -      if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
 +
 +      if (netdev->features & NETIF_F_LRO)
                features |= NETIF_F_LRO;
  
        if (esw_cfg->offload_flags & BIT_0) {
@@@ -806,14 -791,14 +806,14 @@@ qlcnic_check_eswitch_mode(struct qlcnic
        if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
                return 0;
  
 -      priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
 +      priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
        op_mode = readl(priv_op);
 -      priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
 +      priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
  
        if (op_mode == QLC_DEV_DRV_DEFAULT)
                priv_level = QLCNIC_MGMT_FUNC;
        else
 -              priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
 +              priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
  
        if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
                if (priv_level == QLCNIC_MGMT_FUNC) {
@@@ -1053,7 -1038,7 +1053,7 @@@ qlcnic_request_irq(struct qlcnic_adapte
  
        unsigned long flags = 0;
        struct net_device *netdev = adapter->netdev;
 -      struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
                handler = qlcnic_tmp_intr;
@@@ -1090,7 -1075,7 +1090,7 @@@ qlcnic_free_irq(struct qlcnic_adapter *
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
  
 -      struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
        }
  }
  
 -static void
 -qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
 -{
 -      adapter->coal.flags = QLCNIC_INTR_DEFAULT;
 -      adapter->coal.normal.data.rx_time_us =
 -              QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
 -      adapter->coal.normal.data.rx_packets =
 -              QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
 -      adapter->coal.normal.data.tx_time_us =
 -              QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
 -      adapter->coal.normal.data.tx_packets =
 -              QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
 -}
 -
  static int
  __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
  {
                return -EIO;
  
        for (ring = 0; ring < adapter->max_rds_rings; ring++) {
 -              rds_ring = &adapter->recv_ctx.rds_rings[ring];
 -              qlcnic_post_rx_buffers(adapter, ring, rds_ring);
 +              rds_ring = &adapter->recv_ctx->rds_rings[ring];
 +              qlcnic_post_rx_buffers(adapter, rds_ring);
        }
  
        qlcnic_set_multi(netdev);
        qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
  
 -      adapter->ahw.linkup = 0;
 +      adapter->ahw->linkup = 0;
  
        if (adapter->max_sds_rings > 1)
                qlcnic_config_rss(adapter, 1);
@@@ -1231,6 -1230,8 +1231,6 @@@ qlcnic_attach(struct qlcnic_adapter *ad
                goto err_out_free_hw;
        }
  
 -      qlcnic_init_coalesce_defaults(adapter);
 -
        qlcnic_create_sysfs_entries(adapter);
  
        adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
@@@ -1271,7 -1272,7 +1271,7 @@@ void qlcnic_diag_free_res(struct net_de
        clear_bit(__QLCNIC_DEV_UP, &adapter->state);
        if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 -                      sds_ring = &adapter->recv_ctx.sds_rings[ring];
 +                      sds_ring = &adapter->recv_ctx->sds_rings[ring];
                        qlcnic_disable_int(sds_ring);
                }
        }
        netif_device_attach(netdev);
  }
  
 +static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
 +{
 +      int err = 0;
 +      adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
 +                              GFP_KERNEL);
 +      if (!adapter->ahw) {
 +              dev_err(&adapter->pdev->dev,
 +                      "Failed to allocate recv ctx resources for adapter\n");
 +              err = -ENOMEM;
 +              goto err_out;
 +      }
 +      adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
 +                              GFP_KERNEL);
 +      if (!adapter->recv_ctx) {
 +              dev_err(&adapter->pdev->dev,
 +                      "Failed to allocate recv ctx resources for adapter\n");
 +              kfree(adapter->ahw);
 +              adapter->ahw = NULL;
 +              err = -ENOMEM;
 +              goto err_out;
 +      }
 +      /* Initialize interrupt coalesce parameters */
 +      adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
 +      adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
 +      adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
 +err_out:
 +      return err;
 +}
 +
 +static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
 +{
 +      kfree(adapter->recv_ctx);
 +      adapter->recv_ctx = NULL;
 +
 +      kfree(adapter->ahw);
 +      adapter->ahw = NULL;
 +}
 +
  int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
  {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        }
  
        for (ring = 0; ring < adapter->max_rds_rings; ring++) {
 -              rds_ring = &adapter->recv_ctx.rds_rings[ring];
 -              qlcnic_post_rx_buffers(adapter, ring, rds_ring);
 +              rds_ring = &adapter->recv_ctx->rds_rings[ring];
 +              qlcnic_post_rx_buffers(adapter, rds_ring);
        }
  
        if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 -                      sds_ring = &adapter->recv_ctx.sds_rings[ring];
 +                      sds_ring = &adapter->recv_ctx->sds_rings[ring];
                        qlcnic_enable_int(sds_ring);
                }
        }
@@@ -1450,7 -1413,7 +1450,7 @@@ qlcnic_setup_netdev(struct qlcnic_adapt
        netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
                NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
        netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
 -              NETIF_F_IPV6_CSUM);
 +              NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
  
        if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
                netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
@@@ -1538,26 -1501,23 +1538,26 @@@ qlcnic_probe(struct pci_dev *pdev, cons
        adapter = netdev_priv(netdev);
        adapter->netdev  = netdev;
        adapter->pdev    = pdev;
 -      adapter->dev_rst_time = jiffies;
  
 +      if (qlcnic_alloc_adapter_resources(adapter))
 +              goto err_out_free_netdev;
 +
 +      adapter->dev_rst_time = jiffies;
        revision_id = pdev->revision;
 -      adapter->ahw.revision_id = revision_id;
 +      adapter->ahw->revision_id = revision_id;
  
 -      rwlock_init(&adapter->ahw.crb_lock);
 -      mutex_init(&adapter->ahw.mem_lock);
 +      rwlock_init(&adapter->ahw->crb_lock);
 +      mutex_init(&adapter->ahw->mem_lock);
  
        spin_lock_init(&adapter->tx_clean_lock);
        INIT_LIST_HEAD(&adapter->mac_list);
  
        err = qlcnic_setup_pci_map(adapter);
        if (err)
 -              goto err_out_free_netdev;
 +              goto err_out_free_hw;
  
        /* This will be reset for mezz cards  */
 -      adapter->portnum = adapter->ahw.pci_func;
 +      adapter->portnum = adapter->ahw->pci_func;
  
        err = qlcnic_get_board_info(adapter);
        if (err) {
  
                pr_info("%s: %s Board Chip rev 0x%x\n",
                                module_name(THIS_MODULE),
 -                              brd_name, adapter->ahw.revision_id);
 +                              brd_name, adapter->ahw->revision_id);
        }
  
        qlcnic_clear_stats(adapter);
  
        qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
  
 -      switch (adapter->ahw.port_type) {
 +      switch (adapter->ahw->port_type) {
        case QLCNIC_GBE:
                dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
                                adapter->netdev->name);
@@@ -1625,9 -1585,6 +1625,9 @@@ err_out_decr_ref
  err_out_iounmap:
        qlcnic_cleanup_pci_map(adapter);
  
 +err_out_free_hw:
 +      qlcnic_free_adapter_resources(adapter);
 +
  err_out_free_netdev:
        free_netdev(netdev);
  
@@@ -1681,7 -1638,6 +1681,7 @@@ static void __devexit qlcnic_remove(str
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
  
 +      qlcnic_free_adapter_resources(adapter);
        free_netdev(netdev);
  }
  static int __qlcnic_shutdown(struct pci_dev *pdev)
@@@ -1863,7 -1819,6 +1863,7 @@@ static void qlcnic_change_filter(struc
        vlan_req->vlan_id = vlan_id;
  
        tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
 +      smp_mb();
  }
  
  #define QLCNIC_MAC_HASH(MAC)\
@@@ -1924,122 -1879,58 +1924,122 @@@ qlcnic_send_filter(struct qlcnic_adapte
        spin_unlock(&adapter->mac_learn_lock);
  }
  
 -static void
 -qlcnic_tso_check(struct net_device *netdev,
 -              struct qlcnic_host_tx_ring *tx_ring,
 +static int
 +qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                struct cmd_desc_type0 *first_desc,
                struct sk_buff *skb)
  {
 -      u8 opcode = TX_ETHER_PKT;
 -      __be16 protocol = skb->protocol;
 -      u16 flags = 0;
 -      int copied, offset, copy_len, hdr_len = 0, tso = 0;
 +      u8 opcode = 0, hdr_len = 0;
 +      u16 flags = 0, vlan_tci = 0;
 +      int copied, offset, copy_len;
        struct cmd_desc_type0 *hwdesc;
        struct vlan_ethhdr *vh;
 -      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 +      struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
 +      u16 protocol = ntohs(skb->protocol);
        u32 producer = tx_ring->producer;
 -      __le16 vlan_oob = first_desc->flags_opcode &
 -                              cpu_to_le16(FLAGS_VLAN_OOB);
 +
 +      if (protocol == ETH_P_8021Q) {
 +              vh = (struct vlan_ethhdr *)skb->data;
 +              flags = FLAGS_VLAN_TAGGED;
 +              vlan_tci = vh->h_vlan_TCI;
 +      } else if (vlan_tx_tag_present(skb)) {
 +              flags = FLAGS_VLAN_OOB;
 +              vlan_tci = vlan_tx_tag_get(skb);
 +      }
 +      if (unlikely(adapter->pvid)) {
 +              if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
 +                      return -EIO;
 +              if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
 +                      goto set_flags;
 +
 +              flags = FLAGS_VLAN_OOB;
 +              vlan_tci = adapter->pvid;
 +      }
 +set_flags:
 +      qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
 +      qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
  
        if (*(skb->data) & BIT_0) {
                flags |= BIT_0;
                memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
        }
 -
 -      if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
 +      opcode = TX_ETHER_PKT;
 +      if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
                        skb_shinfo(skb)->gso_size > 0) {
  
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  
                first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
                first_desc->total_hdr_length = hdr_len;
 -              if (vlan_oob) {
 +
 +              opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
 +
 +              /* For LSO, we need to copy the MAC/IP/TCP headers into
 +              * the descriptor ring */
 +              copied = 0;
 +              offset = 2;
 +
 +              if (flags & FLAGS_VLAN_OOB) {
                        first_desc->total_hdr_length += VLAN_HLEN;
                        first_desc->tcp_hdr_offset = VLAN_HLEN;
                        first_desc->ip_hdr_offset = VLAN_HLEN;
                        /* Only in case of TSO on vlan device */
                        flags |= FLAGS_VLAN_TAGGED;
 +
 +                      /* Create a TSO vlan header template for firmware */
 +
 +                      hwdesc = &tx_ring->desc_head[producer];
 +                      tx_ring->cmd_buf_arr[producer].skb = NULL;
 +
 +                      copy_len = min((int)sizeof(struct cmd_desc_type0) -
 +                              offset, hdr_len + VLAN_HLEN);
 +
 +                      vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
 +                      skb_copy_from_linear_data(skb, vh, 12);
 +                      vh->h_vlan_proto = htons(ETH_P_8021Q);
 +                      vh->h_vlan_TCI = htons(vlan_tci);
 +
 +                      skb_copy_from_linear_data_offset(skb, 12,
 +                              (char *)vh + 16, copy_len - 16);
 +
 +                      copied = copy_len - VLAN_HLEN;
 +                      offset = 0;
 +
 +                      producer = get_next_index(producer, tx_ring->num_desc);
                }
  
 -              opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
 -                              TX_TCP_LSO6 : TX_TCP_LSO;
 -              tso = 1;
 +              while (copied < hdr_len) {
 +
 +                      copy_len = min((int)sizeof(struct cmd_desc_type0) -
 +                              offset, (hdr_len - copied));
 +
 +                      hwdesc = &tx_ring->desc_head[producer];
 +                      tx_ring->cmd_buf_arr[producer].skb = NULL;
 +
 +                      skb_copy_from_linear_data_offset(skb, copied,
 +                               (char *) hwdesc + offset, copy_len);
 +
 +                      copied += copy_len;
 +                      offset = 0;
 +
 +                      producer = get_next_index(producer, tx_ring->num_desc);
 +              }
 +
 +              tx_ring->producer = producer;
 +              smp_mb();
 +              adapter->stats.lso_frames++;
  
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4proto;
  
 -              if (protocol == cpu_to_be16(ETH_P_IP)) {
 +              if (protocol == ETH_P_IP) {
                        l4proto = ip_hdr(skb)->protocol;
  
                        if (l4proto == IPPROTO_TCP)
                                opcode = TX_TCP_PKT;
                        else if (l4proto == IPPROTO_UDP)
                                opcode = TX_UDP_PKT;
 -              } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
 +              } else if (protocol == ETH_P_IPV6) {
                        l4proto = ipv6_hdr(skb)->nexthdr;
  
                        if (l4proto == IPPROTO_TCP)
                                opcode = TX_UDPV6_PKT;
                }
        }
 -
        first_desc->tcp_hdr_offset += skb_transport_offset(skb);
        first_desc->ip_hdr_offset += skb_network_offset(skb);
        qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
  
 -      if (!tso)
 -              return;
 -
 -      /* For LSO, we need to copy the MAC/IP/TCP headers into
 -       * the descriptor ring
 -       */
 -      copied = 0;
 -      offset = 2;
 -
 -      if (vlan_oob) {
 -              /* Create a TSO vlan header template for firmware */
 -
 -              hwdesc = &tx_ring->desc_head[producer];
 -              tx_ring->cmd_buf_arr[producer].skb = NULL;
 -
 -              copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
 -                              hdr_len + VLAN_HLEN);
 -
 -              vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
 -              skb_copy_from_linear_data(skb, vh, 12);
 -              vh->h_vlan_proto = htons(ETH_P_8021Q);
 -              vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
 -
 -              skb_copy_from_linear_data_offset(skb, 12,
 -                              (char *)vh + 16, copy_len - 16);
 -
 -              copied = copy_len - VLAN_HLEN;
 -              offset = 0;
 -
 -              producer = get_next_index(producer, tx_ring->num_desc);
 -      }
 -
 -      while (copied < hdr_len) {
 -
 -              copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
 -                              (hdr_len - copied));
 -
 -              hwdesc = &tx_ring->desc_head[producer];
 -              tx_ring->cmd_buf_arr[producer].skb = NULL;
 -
 -              skb_copy_from_linear_data_offset(skb, copied,
 -                               (char *)hwdesc + offset, copy_len);
 -
 -              copied += copy_len;
 -              offset = 0;
 -
 -              producer = get_next_index(producer, tx_ring->num_desc);
 -      }
 -
 -      tx_ring->producer = producer;
 -      barrier();
 -      adapter->stats.lso_frames++;
 +      return 0;
  }
  
  static int
@@@ -2103,21 -2046,39 +2103,21 @@@ out_err
        return -ENOMEM;
  }
  
 -static int
 -qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
 -                      struct sk_buff *skb,
 -                      struct cmd_desc_type0 *first_desc)
 +static void
 +qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
 +                      struct qlcnic_cmd_buffer *pbuf)
  {
 -      u8 opcode = 0;
 -      u16 flags = 0;
 -      __be16 protocol = skb->protocol;
 -      struct vlan_ethhdr *vh;
 +      struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
 +      int nr_frags = skb_shinfo(skb)->nr_frags;
 +      int i;
  
 -      if (protocol == cpu_to_be16(ETH_P_8021Q)) {
 -              vh = (struct vlan_ethhdr *)skb->data;
 -              protocol = vh->h_vlan_encapsulated_proto;
 -              flags = FLAGS_VLAN_TAGGED;
 -              qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
 -      } else if (vlan_tx_tag_present(skb)) {
 -              flags = FLAGS_VLAN_OOB;
 -              qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
 +      for (i = 0; i < nr_frags; i++) {
 +              nf = &pbuf->frag_array[i+1];
 +              pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
        }
 -      if (unlikely(adapter->pvid)) {
 -              if (first_desc->vlan_TCI &&
 -                              !(adapter->flags & QLCNIC_TAGGING_ENABLED))
 -                      return -EIO;
 -              if (first_desc->vlan_TCI &&
 -                              (adapter->flags & QLCNIC_TAGGING_ENABLED))
 -                      goto set_flags;
  
 -              flags = FLAGS_VLAN_OOB;
 -              qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
 -      }
 -set_flags:
 -      qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
 -      return 0;
 +      nf = &pbuf->frag_array[0];
 +      pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
  }
  
  static inline void
@@@ -2138,10 -2099,11 +2138,11 @@@ qlcnic_xmit_frame(struct sk_buff *skb, 
        struct cmd_desc_type0 *hwdesc, *first_desc;
        struct pci_dev *pdev;
        struct ethhdr *phdr;
+       int delta = 0;
        int i, k;
  
        u32 producer;
 -      int frag_count, no_of_desc;
 +      int frag_count;
        u32 num_txd = tx_ring->num_desc;
  
        if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
        }
  
        frag_count = skb_shinfo(skb)->nr_frags + 1;
+       /* 14 frags supported for normal packet and
+        * 32 frags supported for TSO packet
+        */
+       if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+               for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+                       delta += skb_shinfo(skb)->frags[i].size;
+               if (!__pskb_pull_tail(skb, delta))
+                       goto drop_packet;
+               frag_count = 1 + skb_shinfo(skb)->nr_frags;
+       }
  
 -      /* 4 fragments per cmd des */
 -      no_of_desc = (frag_count + 3) >> 2;
 -
        if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
                netif_stop_queue(netdev);
 -              smp_mb();
                if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
                        netif_start_queue(netdev);
                else {
        first_desc = hwdesc = &tx_ring->desc_head[producer];
        qlcnic_clear_cmddesc((u64 *)hwdesc);
  
 -      if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
 -              goto drop_packet;
 -
        if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
                adapter->stats.tx_dma_map_error++;
                goto drop_packet;
        }
  
        tx_ring->producer = get_next_index(producer, num_txd);
 +      smp_mb();
  
 -      qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
 +      if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
 +              goto unwind_buff;
  
        if (qlcnic_mac_learn)
                qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
  
        return NETDEV_TX_OK;
  
 +unwind_buff:
 +      qlcnic_unmap_buffers(pdev, skb, pbuf);
  drop_packet:
        adapter->stats.txdropped++;
        dev_kfree_skb_any(skb);
@@@ -2282,16 -2260,16 +2296,16 @@@ void qlcnic_advert_link_change(struct q
  {
        struct net_device *netdev = adapter->netdev;
  
 -      if (adapter->ahw.linkup && !linkup) {
 +      if (adapter->ahw->linkup && !linkup) {
                netdev_info(netdev, "NIC Link is down\n");
 -              adapter->ahw.linkup = 0;
 +              adapter->ahw->linkup = 0;
                if (netif_running(netdev)) {
                        netif_carrier_off(netdev);
                        netif_stop_queue(netdev);
                }
 -      } else if (!adapter->ahw.linkup && linkup) {
 +      } else if (!adapter->ahw->linkup && linkup) {
                netdev_info(netdev, "NIC Link is up\n");
 -              adapter->ahw.linkup = 1;
 +              adapter->ahw->linkup = 1;
                if (netif_running(netdev)) {
                        netif_carrier_on(netdev);
                        netif_wake_queue(netdev);
@@@ -2527,7 -2505,7 +2541,7 @@@ static void qlcnic_poll_controller(stru
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 -      struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
 +      struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        disable_irq(adapter->irq);
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@@ -3537,7 -3515,7 +3551,7 @@@ validate_esw_config(struct qlcnic_adapt
        u8 pci_func;
        int i;
  
 -      op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
 +      op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
  
        for (i = 0; i < count; i++) {
                pci_func = esw_cfg[i].pci_func;
@@@ -3603,13 -3581,13 +3617,13 @@@ qlcnic_sysfs_write_esw_config(struct fi
                        if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
                                return QL_STATUS_INVALID_PARAM;
  
 -              if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
 +              if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
                        continue;
  
                op_mode = esw_cfg[i].op_mode;
                qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
                esw_cfg[i].op_mode = op_mode;
 -              esw_cfg[i].pci_func = adapter->ahw.pci_func;
 +              esw_cfg[i].pci_func = adapter->ahw->pci_func;
  
                switch (esw_cfg[i].op_mode) {
                case QLCNIC_PORT_DEFAULTS:
@@@ -3990,14 -3968,14 +4004,14 @@@ qlcnic_create_diag_entries(struct qlcni
                dev_info(dev, "failed to create crb sysfs entry\n");
        if (device_create_bin_file(dev, &bin_attr_mem))
                dev_info(dev, "failed to create mem sysfs entry\n");
 +      if (device_create_bin_file(dev, &bin_attr_pci_config))
 +              dev_info(dev, "failed to create pci config sysfs entry");
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
                return;
        if (device_create_bin_file(dev, &bin_attr_esw_config))
                dev_info(dev, "failed to create esw config sysfs entry");
        if (adapter->op_mode != QLCNIC_MGMT_FUNC)
                return;
 -      if (device_create_bin_file(dev, &bin_attr_pci_config))
 -              dev_info(dev, "failed to create pci config sysfs entry");
        if (device_create_bin_file(dev, &bin_attr_npar_config))
                dev_info(dev, "failed to create npar config sysfs entry");
        if (device_create_bin_file(dev, &bin_attr_pm_config))
@@@ -4018,12 -3996,12 +4032,12 @@@ qlcnic_remove_diag_entries(struct qlcni
        device_remove_file(dev, &dev_attr_diag_mode);
        device_remove_bin_file(dev, &bin_attr_crb);
        device_remove_bin_file(dev, &bin_attr_mem);
 +      device_remove_bin_file(dev, &bin_attr_pci_config);
        if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
                return;
        device_remove_bin_file(dev, &bin_attr_esw_config);
        if (adapter->op_mode != QLCNIC_MGMT_FUNC)
                return;
 -      device_remove_bin_file(dev, &bin_attr_pci_config);
        device_remove_bin_file(dev, &bin_attr_npar_config);
        device_remove_bin_file(dev, &bin_attr_pm_config);
        device_remove_bin_file(dev, &bin_attr_esw_stats);
@@@ -4070,10 -4048,14 +4084,10 @@@ qlcnic_restore_indev_addr(struct net_de
  
        qlcnic_config_indev_addr(adapter, netdev, event);
  
 -      if (!adapter->vlgrp)
 -              return;
 -
 -      for (vid = 0; vid < VLAN_N_VID; vid++) {
 -              dev = vlan_group_get_device(adapter->vlgrp, vid);
 +      for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
 +              dev = vlan_find_dev(netdev, vid);
                if (!dev)
                        continue;
 -
                qlcnic_config_indev_addr(adapter, dev, event);
        }
  }
diff --combined drivers/net/sfc/efx.c
index c8871b2db38cd147b43278fa68d5c57125a7dfdf,a3c2aab53de8d29407e11386e51c7fefcd00c402..38a55e9e89e424a9cd233d2cf13357074de65337
@@@ -328,7 -328,8 +328,8 @@@ static int efx_poll(struct napi_struct 
   * processing to finish, then directly poll (and ack ) the eventq.
   * Finally reenable NAPI and interrupts.
   *
-  * Since we are touching interrupts the caller should hold the suspend lock
+  * This is for use only during a loopback self-test.  It must not
+  * deliver any packets up the stack as this can result in deadlock.
   */
  void efx_process_channel_now(struct efx_channel *channel)
  {
  
        BUG_ON(channel->channel >= efx->n_channels);
        BUG_ON(!channel->enabled);
+       BUG_ON(!efx->loopback_selftest);
  
        /* Disable interrupts and wait for ISRs to complete */
        efx_nic_disable_interrupts(efx);
@@@ -1436,7 -1438,7 +1438,7 @@@ static void efx_start_all(struct efx_ni
         * restart the transmit interface early so the watchdog timer stops */
        efx_start_port(efx);
  
-       if (efx_dev_registered(efx))
+       if (efx_dev_registered(efx) && !efx->port_inhibited)
                netif_tx_wake_all_queues(efx->net_dev);
  
        efx_for_each_channel(channel, efx)
@@@ -1874,17 -1876,6 +1876,17 @@@ static void efx_set_multicast_list(stru
        /* Otherwise efx_start_port() will do this */
  }
  
 +static int efx_set_features(struct net_device *net_dev, u32 data)
 +{
 +      struct efx_nic *efx = netdev_priv(net_dev);
 +
 +      /* If disabling RX n-tuple filtering, clear existing filters */
 +      if (net_dev->features & ~data & NETIF_F_NTUPLE)
 +              efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
 +
 +      return 0;
 +}
 +
  static const struct net_device_ops efx_netdev_ops = {
        .ndo_open               = efx_net_open,
        .ndo_stop               = efx_net_stop,
        .ndo_change_mtu         = efx_change_mtu,
        .ndo_set_mac_address    = efx_set_mac_address,
        .ndo_set_multicast_list = efx_set_multicast_list,
 +      .ndo_set_features       = efx_set_features,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = efx_netpoll,
  #endif
@@@ -2245,7 -2235,7 +2247,7 @@@ static bool efx_port_dummy_op_poll(stru
        return false;
  }
  
 -static struct efx_phy_operations efx_dummy_phy_operations = {
 +static const struct efx_phy_operations efx_dummy_phy_operations = {
        .init            = efx_port_dummy_op_int,
        .reconfigure     = efx_port_dummy_op_int,
        .poll            = efx_port_dummy_op_poll,
  /* This zeroes out and then fills in the invariants in a struct
   * efx_nic (including all sub-structures).
   */
 -static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
 +static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
                           struct pci_dev *pci_dev, struct net_device *net_dev)
  {
        int i;
        strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
  
        efx->net_dev = net_dev;
 -      efx->rx_checksum_enabled = true;
        spin_lock_init(&efx->stats_lock);
        mutex_init(&efx->mac_lock);
        efx->mac_op = type->default_mac_ops;
@@@ -2451,7 -2442,7 +2453,7 @@@ static int efx_pci_probe_main(struct ef
  static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
                                   const struct pci_device_id *entry)
  {
 -      struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
 +      const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
        struct net_device *net_dev;
        struct efx_nic *efx;
        int i, rc;
                return -ENOMEM;
        net_dev->features |= (type->offload_features | NETIF_F_SG |
                              NETIF_F_HIGHDMA | NETIF_F_TSO |
 -                            NETIF_F_GRO);
 +                            NETIF_F_RXCSUM);
        if (type->offload_features & NETIF_F_V6_CSUM)
                net_dev->features |= NETIF_F_TSO6;
        /* Mask for features that also apply to VLAN devices */
        net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
 -                                 NETIF_F_HIGHDMA | NETIF_F_TSO);
 +                                 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
 +                                 NETIF_F_RXCSUM);
 +      /* All offloads can be toggled */
 +      net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
        efx = netdev_priv(net_dev);
        pci_set_drvdata(pci_dev, efx);
        SET_NETDEV_DEV(net_dev, &pci_dev->dev);
index ab4d05b84eb76cd956f8692cc83356b70e284dc3,191a311da2dc77b4bfa65bcb927148cf791a54a0..5718260298c45188f9e6aa6a74e916621bc63a74
@@@ -330,7 -330,6 +330,6 @@@ enum efx_rx_alloc_method 
   * @eventq_mask: Event queue pointer mask
   * @eventq_read_ptr: Event queue read pointer
   * @last_eventq_read_ptr: Last event queue read pointer value.
-  * @magic_count: Event queue test event count
   * @irq_count: Number of IRQs since last adaptive moderation decision
   * @irq_mod_score: IRQ moderation score
   * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@@ -360,7 -359,6 +359,6 @@@ struct efx_channel 
        unsigned int eventq_mask;
        unsigned int eventq_read_ptr;
        unsigned int last_eventq_read_ptr;
-       unsigned int magic_count;
  
        unsigned int irq_count;
        unsigned int irq_mod_score;
@@@ -681,6 -679,7 +679,6 @@@ struct efx_filter_state
   * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
   * @port_initialized: Port initialized?
   * @net_dev: Operating system network device. Consider holding the rtnl lock
 - * @rx_checksum_enabled: RX checksumming enabled
   * @stats_buffer: DMA buffer for statistics
   * @mac_op: MAC interface
   * @phy_type: PHY type
@@@ -770,13 -769,14 +768,13 @@@ struct efx_nic 
  
        bool port_initialized;
        struct net_device *net_dev;
 -      bool rx_checksum_enabled;
  
        struct efx_buffer stats_buffer;
  
 -      struct efx_mac_operations *mac_op;
 +      const struct efx_mac_operations *mac_op;
  
        unsigned int phy_type;
 -      struct efx_phy_operations *phy_op;
 +      const struct efx_phy_operations *phy_op;
        void *phy_data;
        struct mdio_if_info mdio;
        unsigned int mdio_bus;
@@@ -897,7 -897,7 +895,7 @@@ struct efx_nic_type 
        void (*resume_wol)(struct efx_nic *efx);
        int (*test_registers)(struct efx_nic *efx);
        int (*test_nvram)(struct efx_nic *efx);
 -      struct efx_mac_operations *default_mac_ops;
 +      const struct efx_mac_operations *default_mac_ops;
  
        int revision;
        unsigned int mem_map_size;
diff --combined drivers/net/sfc/nic.c
index 2594f39c3ba4929379dcec205f8cf11491c9b8a2,10f1cb79c1472cfb91f9a50f16f827248bd00c96..5ac9fa2cd3bc2a740cbe4d920c3944e4dfc4a913
@@@ -84,7 -84,8 +84,8 @@@ static inline void efx_write_buf_tbl(st
  static inline efx_qword_t *efx_event(struct efx_channel *channel,
                                     unsigned int index)
  {
-       return ((efx_qword_t *) (channel->eventq.addr)) + index;
+       return ((efx_qword_t *) (channel->eventq.addr)) +
+               (index & channel->eventq_mask);
  }
  
  /* See if an event is present
@@@ -673,7 -674,8 +674,8 @@@ void efx_nic_eventq_read_ack(struct efx
        efx_dword_t reg;
        struct efx_nic *efx = channel->efx;
  
-       EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+       EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+                            channel->eventq_read_ptr & channel->eventq_mask);
        efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
                         channel->channel);
  }
@@@ -850,6 -852,7 +852,6 @@@ efx_handle_rx_event(struct efx_channel 
        unsigned expected_ptr;
        bool rx_ev_pkt_ok, discard = false, checksummed;
        struct efx_rx_queue *rx_queue;
 -      struct efx_nic *efx = channel->efx;
  
        /* Basic packet information */
        rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
                 * UDP/IP, then we can rely on the hardware checksum.
                 */
                checksummed =
 -                      likely(efx->rx_checksum_enabled) &&
 -                      (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
 -                       rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
 +                      rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
 +                      rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
        } else {
                efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
                checksummed = false;
@@@ -906,7 -910,7 +908,7 @@@ efx_handle_generated_event(struct efx_c
  
        code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
        if (code == EFX_CHANNEL_MAGIC_TEST(channel))
-               ++channel->magic_count;
+               ; /* ignore */
        else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
                /* The queue must be empty, so we won't receive any rx
                 * events, so efx_process_channel() won't refill the
@@@ -1013,8 -1017,7 +1015,7 @@@ int efx_nic_process_eventq(struct efx_c
                /* Clear this event by marking it all ones */
                EFX_SET_QWORD(*p_event);
  
-               /* Increment read pointer */
-               read_ptr = (read_ptr + 1) & channel->eventq_mask;
+               ++read_ptr;
  
                ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
  
        return spent;
  }
  
+ /* Check whether an event is present in the eventq at the current
+  * read pointer.  Only useful for self-test.
+  */
+ bool efx_nic_event_present(struct efx_channel *channel)
+ {
+       return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+ }
  
  /* Allocate buffer table entries for event queue */
  int efx_nic_probe_eventq(struct efx_channel *channel)
@@@ -1163,7 -1173,7 +1171,7 @@@ static void efx_poll_flush_events(struc
        struct efx_tx_queue *tx_queue;
        struct efx_rx_queue *rx_queue;
        unsigned int read_ptr = channel->eventq_read_ptr;
-       unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+       unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
  
        do {
                efx_qword_t *event = efx_event(channel, read_ptr);
                 * it's ok to throw away every non-flush event */
                EFX_SET_QWORD(*event);
  
-               read_ptr = (read_ptr + 1) & channel->eventq_mask;
+               ++read_ptr;
        } while (read_ptr != end_ptr);
  
        channel->eventq_read_ptr = read_ptr;
diff --combined drivers/net/sfc/nic.h
index f7ec03cc002d5922d2c8fd76dad0c422e81cc8fa,a42db6e35be39166d86f72887def5619c581dc58..7443f99c977f0cf0757f1296560aa611e0ade47f
@@@ -150,9 -150,9 +150,9 @@@ struct siena_nic_data 
        int wol_filter_id;
  };
  
 -extern struct efx_nic_type falcon_a1_nic_type;
 -extern struct efx_nic_type falcon_b0_nic_type;
 -extern struct efx_nic_type siena_a0_nic_type;
 +extern const struct efx_nic_type falcon_a1_nic_type;
 +extern const struct efx_nic_type falcon_b0_nic_type;
 +extern const struct efx_nic_type siena_a0_nic_type;
  
  /**************************************************************************
   *
@@@ -184,6 -184,7 +184,7 @@@ extern void efx_nic_fini_eventq(struct 
  extern void efx_nic_remove_eventq(struct efx_channel *channel);
  extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
  extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+ extern bool efx_nic_event_present(struct efx_channel *channel);
  
  /* MAC/PHY */
  extern void falcon_drain_tx_fifo(struct efx_nic *efx);
index 1b5bd13b0a6c12ced3bda55c186fdf29475dbda2,c95bc5cc1a1fbb4bad3d2e816477c5cfbe96fb97..c8a2d0dae7964de2a50a57b8b076ad54c131ba62
@@@ -130,20 -130,6 +130,20 @@@ bool ath9k_hw_wait(struct ath_hw *ah, u
  }
  EXPORT_SYMBOL(ath9k_hw_wait);
  
 +void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
 +                        int column, unsigned int *writecnt)
 +{
 +      int r;
 +
 +      ENABLE_REGWRITE_BUFFER(ah);
 +      for (r = 0; r < array->ia_rows; r++) {
 +              REG_WRITE(ah, INI_RA(array, r, 0),
 +                        INI_RA(array, r, column));
 +              DO_DELAY(*writecnt);
 +      }
 +      REGWRITE_BUFFER_FLUSH(ah);
 +}
 +
  u32 ath9k_hw_reverse_bits(u32 val, u32 n)
  {
        u32 retval;
        return retval;
  }
  
 -bool ath9k_get_channel_edges(struct ath_hw *ah,
 -                           u16 flags, u16 *low,
 -                           u16 *high)
 -{
 -      struct ath9k_hw_capabilities *pCap = &ah->caps;
 -
 -      if (flags & CHANNEL_5GHZ) {
 -              *low = pCap->low_5ghz_chan;
 -              *high = pCap->high_5ghz_chan;
 -              return true;
 -      }
 -      if ((flags & CHANNEL_2GHZ)) {
 -              *low = pCap->low_2ghz_chan;
 -              *high = pCap->high_2ghz_chan;
 -              return true;
 -      }
 -      return false;
 -}
 -
  u16 ath9k_hw_computetxtime(struct ath_hw *ah,
                           u8 phy, int kbps,
                           u32 frameLen, u16 rateix,
@@@ -359,6 -364,11 +359,6 @@@ static void ath9k_hw_init_config(struc
                ah->config.spurchans[i][1] = AR_NO_SPUR;
        }
  
 -      if (ah->hw_version.devid != AR2427_DEVID_PCIE)
 -              ah->config.ht_enable = 1;
 -      else
 -              ah->config.ht_enable = 0;
 -
        /* PAPRD needs some more work to be enabled */
        ah->config.paprd_disable = 1;
  
@@@ -400,8 -410,6 +400,8 @@@ static void ath9k_hw_init_defaults(stru
        ah->sta_id1_defaults =
                AR_STA_ID1_CRPT_MIC_ENABLE |
                AR_STA_ID1_MCAST_KSRCH;
 +      if (AR_SREV_9100(ah))
 +              ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
        ah->enable_32kHz_clock = DONT_USE_32KHZ;
        ah->slottime = 20;
        ah->globaltxtimeout = (u32) -1;
@@@ -665,14 -673,14 +665,14 @@@ static void ath9k_hw_init_qos(struct at
  
  unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
  {
 -              REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
 -              udelay(100);
 -              REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
 +      REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
 +      udelay(100);
 +      REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
  
 -              while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
 -                      udelay(100);
 +      while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
 +              udelay(100);
  
 -              return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
 +      return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
  }
  EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
  
@@@ -822,7 -830,8 +822,7 @@@ void ath9k_hw_init_global_settings(stru
                ah->misc_mode);
  
        if (ah->misc_mode != 0)
 -              REG_WRITE(ah, AR_PCU_MISC,
 -                        REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
 +              REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
  
        if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
                sifstime = 16;
@@@ -890,19 -899,23 +890,19 @@@ u32 ath9k_regd_get_ctl(struct ath_regul
  static inline void ath9k_hw_set_dma(struct ath_hw *ah)
  {
        struct ath_common *common = ath9k_hw_common(ah);
 -      u32 regval;
  
        ENABLE_REGWRITE_BUFFER(ah);
  
        /*
         * set AHB_MODE not to do cacheline prefetches
        */
 -      if (!AR_SREV_9300_20_OR_LATER(ah)) {
 -              regval = REG_READ(ah, AR_AHB_MODE);
 -              REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
 -      }
 +      if (!AR_SREV_9300_20_OR_LATER(ah))
 +              REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN);
  
        /*
         * let mac dma reads be in 128 byte chunks
         */
 -      regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
 -      REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
 +      REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK);
  
        REGWRITE_BUFFER_FLUSH(ah);
  
        /*
         * let mac dma writes be in 128 byte chunks
         */
 -      regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
 -      REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
 +      REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK);
  
        /*
         * Setup receive FIFO threshold to hold off TX activities
  
  static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
  {
 -      u32 val;
 +      u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
 +      u32 set = AR_STA_ID1_KSRCH_MODE;
  
 -      val = REG_READ(ah, AR_STA_ID1);
 -      val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
        switch (opmode) {
 -      case NL80211_IFTYPE_AP:
 -              REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
 -                        | AR_STA_ID1_KSRCH_MODE);
 -              REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
 -              break;
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_MESH_POINT:
 -              REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
 -                        | AR_STA_ID1_KSRCH_MODE);
 +              set |= AR_STA_ID1_ADHOC;
                REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
                break;
 +      case NL80211_IFTYPE_AP:
 +              set |= AR_STA_ID1_STA_AP;
 +              /* fall through */
        case NL80211_IFTYPE_STATION:
 -              REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
 +              REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
                break;
        default:
 -              if (ah->is_monitoring)
 -                      REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
 +              if (!ah->is_monitoring)
 +                      set = 0;
                break;
        }
 +      REG_RMW(ah, AR_STA_ID1, set, mask);
  }
  
  void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
@@@ -1004,8 -1021,10 +1004,8 @@@ static bool ath9k_hw_set_reset(struct a
        u32 tmpReg;
  
        if (AR_SREV_9100(ah)) {
 -              u32 val = REG_READ(ah, AR_RTC_DERIVED_CLK);
 -              val &= ~AR_RTC_DERIVED_CLK_PERIOD;
 -              val |= SM(1, AR_RTC_DERIVED_CLK_PERIOD);
 -              REG_WRITE(ah, AR_RTC_DERIVED_CLK, val);
 +              REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
 +                            AR_RTC_DERIVED_CLK_PERIOD, 1);
                (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
        }
  
@@@ -1193,20 -1212,6 +1193,20 @@@ static bool ath9k_hw_channel_change(str
        return true;
  }
  
 +static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
 +{
 +      u32 gpio_mask = ah->gpio_mask;
 +      int i;
 +
 +      for (i = 0; gpio_mask; i++, gpio_mask >>= 1) {
 +              if (!(gpio_mask & 1))
 +                      continue;
 +
 +              ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 +              ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
 +      }
 +}
 +
  bool ath9k_hw_check_alive(struct ath_hw *ah)
  {
        int count = 50;
@@@ -1249,15 -1254,6 +1249,6 @@@ int ath9k_hw_reset(struct ath_hw *ah, s
        ah->txchainmask = common->tx_chainmask;
        ah->rxchainmask = common->rx_chainmask;
  
-       if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
-               ath9k_hw_abortpcurecv(ah);
-               if (!ath9k_hw_stopdmarecv(ah)) {
-                       ath_dbg(common, ATH_DBG_XMIT,
-                               "Failed to stop receive dma\n");
-                       bChannelChange = false;
-               }
-       }
        if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
                return -EIO;
  
        REGWRITE_BUFFER_FLUSH(ah);
  
        ah->intr_txqs = 0;
 -      for (i = 0; i < ah->caps.total_queues; i++)
 +      for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
                ath9k_hw_resettxqueue(ah, i);
  
        ath9k_hw_init_interrupt_masks(ah, ah->opmode);
                ar9002_hw_enable_wep_aggregation(ah);
        }
  
 -      REG_WRITE(ah, AR_STA_ID1,
 -                REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
 +      REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
  
        ath9k_hw_set_dma(ah);
  
        if (AR_SREV_9300_20_OR_LATER(ah))
                ar9003_hw_bb_watchdog_config(ah);
  
 +      ath9k_hw_apply_gpio_override(ah);
 +
        return 0;
  }
  EXPORT_SYMBOL(ath9k_hw_reset);
@@@ -1675,15 -1670,21 +1666,15 @@@ void ath9k_hw_beaconinit(struct ath_hw 
        case NL80211_IFTYPE_MESH_POINT:
                REG_SET_BIT(ah, AR_TXCFG,
                            AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
 -              REG_WRITE(ah, AR_NEXT_NDP_TIMER,
 -                        TU_TO_USEC(next_beacon +
 -                                   (ah->atim_window ? ah->
 -                                    atim_window : 1)));
 +              REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
 +                        TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
                flags |= AR_NDP_TIMER_EN;
        case NL80211_IFTYPE_AP:
 -              REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
 -              REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
 -                        TU_TO_USEC(next_beacon -
 -                                   ah->config.
 -                                   dma_beacon_response_time));
 -              REG_WRITE(ah, AR_NEXT_SWBA,
 -                        TU_TO_USEC(next_beacon -
 -                                   ah->config.
 -                                   sw_beacon_response_time));
 +              REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
 +              REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
 +                        TU_TO_USEC(ah->config.dma_beacon_response_time));
 +              REG_WRITE(ah, AR_NEXT_SWBA, next_beacon -
 +                        TU_TO_USEC(ah->config.sw_beacon_response_time));
                flags |=
                        AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
                break;
                break;
        }
  
 -      REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
 -      REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period));
 -      REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
 -      REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
 +      REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
 +      REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
 +      REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
 +      REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
  
        REGWRITE_BUFFER_FLUSH(ah);
  
 -      beacon_period &= ~ATH9K_BEACON_ENA;
 -      if (beacon_period & ATH9K_BEACON_RESET_TSF) {
 -              ath9k_hw_reset_tsf(ah);
 -      }
 -
        REG_SET_BIT(ah, AR_TIMER_MODE, flags);
  }
  EXPORT_SYMBOL(ath9k_hw_beaconinit);
@@@ -1836,8 -1842,6 +1827,8 @@@ int ath9k_hw_fill_cap_info(struct ath_h
            !(AR_SREV_9271(ah)))
                /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */
                pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7;
 +      else if (AR_SREV_9100(ah))
 +              pCap->rx_chainmask = 0x7;
        else
                /* Use rx_chainmask from EEPROM. */
                pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
        if (AR_SREV_9300_20_OR_LATER(ah))
                ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
  
 -      pCap->low_2ghz_chan = 2312;
 -      pCap->high_2ghz_chan = 2732;
 -
 -      pCap->low_5ghz_chan = 4920;
 -      pCap->high_5ghz_chan = 6100;
 -
        common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
  
 -      if (ah->config.ht_enable)
 +      if (ah->hw_version.devid != AR2427_DEVID_PCIE)
                pCap->hw_caps |= ATH9K_HW_CAP_HT;
        else
                pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
  
 -      if (capField & AR_EEPROM_EEPCAP_MAXQCU)
 -              pCap->total_queues =
 -                      MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
 -      else
 -              pCap->total_queues = ATH9K_NUM_TX_QUEUES;
 -
 -      if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
 -              pCap->keycache_size =
 -                      1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
 -      else
 -              pCap->keycache_size = AR_KEYTABLE_SIZE;
 -
 -      if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
 -              pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
 -      else
 -              pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
 -
        if (AR_SREV_9271(ah))
                pCap->num_gpio_pins = AR9271_NUM_GPIO;
        else if (AR_DEVID_7010(ah))
                pCap->rts_aggr_limit = (8 * 1024);
        }
  
 -      pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
 -
  #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
        ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
        if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
        else
                pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
  
 -      if (regulatory->current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) {
 -              pCap->reg_cap =
 -                      AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
 -                      AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
 -                      AR_EEPROM_EEREGCAP_EN_KK_U2 |
 -                      AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
 -      } else {
 -              pCap->reg_cap =
 -                      AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
 -                      AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
 -      }
 -
 -      /* Advertise midband for AR5416 with FCC midband set in eeprom */
 -      if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
 -          AR_SREV_5416(ah))
 -              pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
 -
        if (AR_SREV_9280_20_OR_LATER(ah) && common->btcoex_enabled) {
                btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
                btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
@@@ -2140,9 -2186,11 +2131,9 @@@ void ath9k_hw_setrxfilter(struct ath_h
        REG_WRITE(ah, AR_PHY_ERR, phybits);
  
        if (phybits)
 -              REG_WRITE(ah, AR_RXCFG,
 -                        REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
 +              REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
        else
 -              REG_WRITE(ah, AR_RXCFG,
 -                        REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
 +              REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
  
        REGWRITE_BUFFER_FLUSH(ah);
  }
@@@ -2318,11 -2366,10 +2309,11 @@@ static u32 rightmost_index(struct ath_g
        return timer_table->gen_timer_index[b];
  }
  
 -static u32 ath9k_hw_gettsf32(struct ath_hw *ah)
 +u32 ath9k_hw_gettsf32(struct ath_hw *ah)
  {
        return REG_READ(ah, AR_TSF_L32);
  }
 +EXPORT_SYMBOL(ath9k_hw_gettsf32);
  
  struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
                                          void (*trigger)(void *),
index 6f431cbff38a3761df2329ffe4a92b9b8b598b3c,edc1cbbfecaf6fbd153ce00237f1101f49096edc..1968c67e3fababaa1eb357f482acfc9d46309530
@@@ -209,8 -209,15 +209,8 @@@ bool ath9k_hw_set_txq_props(struct ath_
  {
        u32 cw;
        struct ath_common *common = ath9k_hw_common(ah);
 -      struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath9k_tx_queue_info *qi;
  
 -      if (q >= pCap->total_queues) {
 -              ath_dbg(common, ATH_DBG_QUEUE,
 -                      "Set TXQ properties, invalid queue: %u\n", q);
 -              return false;
 -      }
 -
        qi = &ah->txq[q];
        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
                ath_dbg(common, ATH_DBG_QUEUE,
@@@ -273,8 -280,15 +273,8 @@@ bool ath9k_hw_get_txq_props(struct ath_
                            struct ath9k_tx_queue_info *qinfo)
  {
        struct ath_common *common = ath9k_hw_common(ah);
 -      struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath9k_tx_queue_info *qi;
  
 -      if (q >= pCap->total_queues) {
 -              ath_dbg(common, ATH_DBG_QUEUE,
 -                      "Get TXQ properties, invalid queue: %u\n", q);
 -              return false;
 -      }
 -
        qi = &ah->txq[q];
        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
                ath_dbg(common, ATH_DBG_QUEUE,
@@@ -306,27 -320,28 +306,27 @@@ int ath9k_hw_setuptxqueue(struct ath_h
  {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_tx_queue_info *qi;
 -      struct ath9k_hw_capabilities *pCap = &ah->caps;
        int q;
  
        switch (type) {
        case ATH9K_TX_QUEUE_BEACON:
 -              q = pCap->total_queues - 1;
 +              q = ATH9K_NUM_TX_QUEUES - 1;
                break;
        case ATH9K_TX_QUEUE_CAB:
 -              q = pCap->total_queues - 2;
 +              q = ATH9K_NUM_TX_QUEUES - 2;
                break;
        case ATH9K_TX_QUEUE_PSPOLL:
                q = 1;
                break;
        case ATH9K_TX_QUEUE_UAPSD:
 -              q = pCap->total_queues - 3;
 +              q = ATH9K_NUM_TX_QUEUES - 3;
                break;
        case ATH9K_TX_QUEUE_DATA:
 -              for (q = 0; q < pCap->total_queues; q++)
 +              for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
                        if (ah->txq[q].tqi_type ==
                            ATH9K_TX_QUEUE_INACTIVE)
                                break;
 -              if (q == pCap->total_queues) {
 +              if (q == ATH9K_NUM_TX_QUEUES) {
                        ath_err(common, "No available TX queue\n");
                        return -1;
                }
@@@ -367,9 -382,15 +367,9 @@@ EXPORT_SYMBOL(ath9k_hw_setuptxqueue)
  
  bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
  {
 -      struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_tx_queue_info *qi;
  
 -      if (q >= pCap->total_queues) {
 -              ath_dbg(common, ATH_DBG_QUEUE,
 -                      "Release TXQ, invalid queue: %u\n", q);
 -              return false;
 -      }
        qi = &ah->txq[q];
        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
                ath_dbg(common, ATH_DBG_QUEUE,
@@@ -393,11 -414,18 +393,11 @@@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue)
  
  bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
  {
 -      struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_channel *chan = ah->curchan;
        struct ath9k_tx_queue_info *qi;
        u32 cwMin, chanCwMin, value;
  
 -      if (q >= pCap->total_queues) {
 -              ath_dbg(common, ATH_DBG_QUEUE,
 -                      "Reset TXQ, invalid queue: %u\n", q);
 -              return false;
 -      }
 -
        qi = &ah->txq[q];
        if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
                ath_dbg(common, ATH_DBG_QUEUE,
                REG_WRITE(ah, AR_QCBRCFG(q),
                          SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
                          SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
 -              REG_WRITE(ah, AR_QMISC(q),
 -                        REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
 -                        (qi->tqi_cbrOverflowLimit ?
 -                         AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
 +              REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
 +                          (qi->tqi_cbrOverflowLimit ?
 +                           AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
        }
        if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
                REG_WRITE(ah, AR_QRDYTIMECFG(q),
                  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
  
        if (qi->tqi_burstTime
 -          && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
 -              REG_WRITE(ah, AR_QMISC(q),
 -                        REG_READ(ah, AR_QMISC(q)) |
 -                        AR_Q_MISC_RDYTIME_EXP_POLICY);
 +          && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
 +              REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
  
 -      }
 -
 -      if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
 -              REG_WRITE(ah, AR_DMISC(q),
 -                        REG_READ(ah, AR_DMISC(q)) |
 -                        AR_D_MISC_POST_FR_BKOFF_DIS);
 -      }
 +      if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
 +              REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
  
        REGWRITE_BUFFER_FLUSH(ah);
  
 -      if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
 -              REG_WRITE(ah, AR_DMISC(q),
 -                        REG_READ(ah, AR_DMISC(q)) |
 -                        AR_D_MISC_FRAG_BKOFF_EN);
 -      }
 +      if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
 +              REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
 +
        switch (qi->tqi_type) {
        case ATH9K_TX_QUEUE_BEACON:
                ENABLE_REGWRITE_BUFFER(ah);
  
 -              REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
 -                        | AR_Q_MISC_FSP_DBA_GATED
 -                        | AR_Q_MISC_BEACON_USE
 -                        | AR_Q_MISC_CBR_INCR_DIS1);
 +              REG_SET_BIT(ah, AR_QMISC(q),
 +                          AR_Q_MISC_FSP_DBA_GATED
 +                          | AR_Q_MISC_BEACON_USE
 +                          | AR_Q_MISC_CBR_INCR_DIS1);
  
 -              REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
 -                        | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
 +              REG_SET_BIT(ah, AR_DMISC(q),
 +                          (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
                             AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
 -                        | AR_D_MISC_BEACON_USE
 -                        | AR_D_MISC_POST_FR_BKOFF_DIS);
 +                          | AR_D_MISC_BEACON_USE
 +                          | AR_D_MISC_POST_FR_BKOFF_DIS);
  
                REGWRITE_BUFFER_FLUSH(ah);
  
        case ATH9K_TX_QUEUE_CAB:
                ENABLE_REGWRITE_BUFFER(ah);
  
 -              REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
 -                        | AR_Q_MISC_FSP_DBA_GATED
 -                        | AR_Q_MISC_CBR_INCR_DIS1
 -                        | AR_Q_MISC_CBR_INCR_DIS0);
 +              REG_SET_BIT(ah, AR_QMISC(q),
 +                          AR_Q_MISC_FSP_DBA_GATED
 +                          | AR_Q_MISC_CBR_INCR_DIS1
 +                          | AR_Q_MISC_CBR_INCR_DIS0);
                value = (qi->tqi_readyTime -
                         (ah->config.sw_beacon_response_time -
                          ah->config.dma_beacon_response_time) -
                         ah->config.additional_swba_backoff) * 1024;
                REG_WRITE(ah, AR_QRDYTIMECFG(q),
                          value | AR_Q_RDYTIMECFG_EN);
 -              REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
 -                        | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
 +              REG_SET_BIT(ah, AR_DMISC(q),
 +                          (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
                             AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
  
                REGWRITE_BUFFER_FLUSH(ah);
  
                break;
        case ATH9K_TX_QUEUE_PSPOLL:
 -              REG_WRITE(ah, AR_QMISC(q),
 -                        REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
 +              REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
                break;
        case ATH9K_TX_QUEUE_UAPSD:
 -              REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
 -                        AR_D_MISC_POST_FR_BKOFF_DIS);
 +              REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
                break;
        default:
                break;
        }
  
        if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
 -              REG_WRITE(ah, AR_DMISC(q),
 -                        REG_READ(ah, AR_DMISC(q)) |
 -                        SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
 -                           AR_D_MISC_ARB_LOCKOUT_CNTRL) |
 -                        AR_D_MISC_POST_FR_BKOFF_DIS);
 +              REG_SET_BIT(ah, AR_DMISC(q),
 +                          SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
 +                             AR_D_MISC_ARB_LOCKOUT_CNTRL) |
 +                          AR_D_MISC_POST_FR_BKOFF_DIS);
        }
  
        if (AR_SREV_9300_20_OR_LATER(ah))
@@@ -710,32 -751,53 +710,51 @@@ void ath9k_hw_abortpcurecv(struct ath_h
  }
  EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
  
- bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
+ bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
  {
  #define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
 -#define AH_RX_TIME_QUANTUM     100     /* usec */
        struct ath_common *common = ath9k_hw_common(ah);
+       u32 mac_status, last_mac_status = 0;
        int i;
  
+       /* Enable access to the DMA observation bus */
+       REG_WRITE(ah, AR_MACMISC,
+                 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+                  (AR_MACMISC_MISC_OBS_BUS_1 <<
+                   AR_MACMISC_MISC_OBS_BUS_MSB_S)));
        REG_WRITE(ah, AR_CR, AR_CR_RXD);
  
        /* Wait for rx enable bit to go low */
        for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
                if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
                        break;
+               if (!AR_SREV_9300_20_OR_LATER(ah)) {
+                       mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
+                       if (mac_status == 0x1c0 && mac_status == last_mac_status) {
+                               *reset = true;
+                               break;
+                       }
+                       last_mac_status = mac_status;
+               }
                udelay(AH_TIME_QUANTUM);
        }
  
        if (i == 0) {
                ath_err(common,
-                       "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+                       "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
                        AH_RX_STOP_DMA_TIMEOUT / 1000,
                        REG_READ(ah, AR_CR),
-                       REG_READ(ah, AR_DIAG_SW));
+                       REG_READ(ah, AR_DIAG_SW),
+                       REG_READ(ah, AR_DMADBG_7));
                return false;
        } else {
                return true;
        }
  
 -#undef AH_RX_TIME_QUANTUM
  #undef AH_RX_STOP_DMA_TIMEOUT
  }
  EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
index 3842b751866122f4fb517d42b43c6e22175cfab9,dcd19bc337d1ddf96890432f085ee6ee83c9658d..cfaf0a48b93998464719cfe0c5e1ef19bf700dea
@@@ -486,12 -486,12 +486,12 @@@ start_recv
  bool ath_stoprecv(struct ath_softc *sc)
  {
        struct ath_hw *ah = sc->sc_ah;
-       bool stopped;
+       bool stopped, reset = false;
  
        spin_lock_bh(&sc->rx.rxbuflock);
        ath9k_hw_abortpcurecv(ah);
        ath9k_hw_setrxfilter(ah, 0);
-       stopped = ath9k_hw_stopdmarecv(ah);
+       stopped = ath9k_hw_stopdmarecv(ah, &reset);
  
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
                ath_edma_stop_recv(sc);
                        "confusing the DMA engine when we start RX up\n");
                ATH_DBG_WARN_ON_ONCE(!stopped);
        }
-       return stopped;
+       return stopped || reset;
  }
  
  void ath_flushrecv(struct ath_softc *sc)
@@@ -574,7 -574,7 +574,7 @@@ static void ath_rx_ps_beacon(struct ath
                sc->ps_flags &= ~PS_BEACON_SYNC;
                ath_dbg(common, ATH_DBG_PS,
                        "Reconfigure Beacon timers based on timestamp from the AP\n");
 -              ath_beacon_config(sc, NULL);
 +              ath_set_beacon(sc);
        }
  
        if (ath_beacon_dtim_pending_cab(skb)) {
diff --combined net/core/dev.c
index f523eee3141cc682f563d41bb8aa575916257bba,c2ac599fa0f642f368025ababaa6c95476874c24..3871bf69a38680a290dfa76636e74bb214f6f13a
@@@ -3130,12 -3130,6 +3130,12 @@@ another_round
  
        __this_cpu_inc(softnet_data.processed);
  
 +      if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
 +              skb = vlan_untag(skb);
 +              if (unlikely(!skb))
 +                      goto out;
 +      }
 +
  #ifdef CONFIG_NET_CLS_ACT
        if (skb->tc_verd & TC_NCLS) {
                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@@ -3183,7 -3177,7 +3183,7 @@@ ncls
                        ret = deliver_skb(skb, pt_prev, orig_dev);
                        pt_prev = NULL;
                }
 -              if (vlan_hwaccel_do_receive(&skb)) {
 +              if (vlan_do_receive(&skb)) {
                        ret = __netif_receive_skb(skb);
                        goto out;
                } else if (unlikely(!skb))
@@@ -5209,11 -5203,15 +5209,15 @@@ u32 netdev_fix_features(struct net_devi
        }
  
        /* TSO requires that SG is present as well. */
-       if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
-               netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
-               features &= ~NETIF_F_TSO;
+       if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
+               netdev_info(dev, "Dropping TSO features since no SG feature.\n");
+               features &= ~NETIF_F_ALL_TSO;
        }
  
+       /* TSO ECN requires that TSO is present as well. */
+       if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
+               features &= ~NETIF_F_TSO_ECN;
        /* Software GSO depends on SG. */
        if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
                netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
  }
  EXPORT_SYMBOL(netdev_fix_features);
  
 -void netdev_update_features(struct net_device *dev)
 +int __netdev_update_features(struct net_device *dev)
  {
        u32 features;
        int err = 0;
  
 +      ASSERT_RTNL();
 +
        features = netdev_get_wanted_features(dev);
  
        if (dev->netdev_ops->ndo_fix_features)
        features = netdev_fix_features(dev, features);
  
        if (dev->features == features)
 -              return;
 +              return 0;
  
        netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
                dev->features, features);
        if (dev->netdev_ops->ndo_set_features)
                err = dev->netdev_ops->ndo_set_features(dev, features);
  
 -      if (!err)
 -              dev->features = features;
 -      else if (err < 0)
 +      if (unlikely(err < 0)) {
                netdev_err(dev,
                        "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
                        err, features, dev->features);
 +              return -1;
 +      }
 +
 +      if (!err)
 +              dev->features = features;
 +
 +      return 1;
 +}
 +
 +void netdev_update_features(struct net_device *dev)
 +{
 +      if (__netdev_update_features(dev))
 +              netdev_features_change(dev);
  }
  EXPORT_SYMBOL(netdev_update_features);
  
@@@ -5433,14 -5418,6 +5437,14 @@@ int register_netdevice(struct net_devic
                dev->features &= ~NETIF_F_GSO;
        }
  
 +      /* Turn on no cache copy if HW is doing checksum */
 +      dev->hw_features |= NETIF_F_NOCACHE_COPY;
 +      if ((dev->features & NETIF_F_ALL_CSUM) &&
 +          !(dev->features & NETIF_F_NO_CSUM)) {
 +              dev->wanted_features |= NETIF_F_NOCACHE_COPY;
 +              dev->features |= NETIF_F_NOCACHE_COPY;
 +      }
 +
        /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
         * vlan_dev_init() will do the dev->features check, so these features
         * are enabled only if supported by underlying device.
                goto err_uninit;
        dev->reg_state = NETREG_REGISTERED;
  
 -      netdev_update_features(dev);
 +      __netdev_update_features(dev);
  
        /*
         *      Default initial state at registry is that the
@@@ -6198,10 -6175,6 +6202,10 @@@ u32 netdev_increment_features(u32 all, 
                }
        }
  
 +      /* If device can't no cache copy, don't do for all */
 +      if (!(one & NETIF_F_NOCACHE_COPY))
 +              all &= ~NETIF_F_NOCACHE_COPY;
 +
        one |= NETIF_F_ALL_CSUM;
  
        one |= all & NETIF_F_ONE_FOR_ALL;
index f784608a4c45205c977a2af45da7ae2334553430,38f23e721b80108d0a9ae3716fca5212cf8f737d..8514db54a7f4a5c274557e8c50b992c105648f45
@@@ -73,7 -73,7 +73,7 @@@ int inet_csk_bind_conflict(const struc
                     !sk2->sk_bound_dev_if ||
                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
                        if (!reuse || !sk2->sk_reuse ||
-                           ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
+                           sk2->sk_state == TCP_LISTEN) {
                                const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
                                if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
                                    sk2_rcv_saddr == sk_rcv_saddr(sk))
@@@ -122,8 -122,7 +122,7 @@@ again
                                            (tb->num_owners < smallest_size || smallest_size == -1)) {
                                                smallest_size = tb->num_owners;
                                                smallest_rover = rover;
-                                               if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
-                                                   !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+                                               if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
                                                        spin_unlock(&head->lock);
                                                        snum = smallest_rover;
                                                        goto have_snum;
@@@ -356,14 -355,20 +355,14 @@@ struct dst_entry *inet_csk_route_req(st
        struct rtable *rt;
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct ip_options *opt = inet_rsk(req)->opt;
 -      struct flowi4 fl4 = {
 -              .flowi4_oif = sk->sk_bound_dev_if,
 -              .flowi4_mark = sk->sk_mark,
 -              .daddr = ((opt && opt->srr) ?
 -                        opt->faddr : ireq->rmt_addr),
 -              .saddr = ireq->loc_addr,
 -              .flowi4_tos = RT_CONN_FLAGS(sk),
 -              .flowi4_proto = sk->sk_protocol,
 -              .flowi4_flags = inet_sk_flowi_flags(sk),
 -              .fl4_sport = inet_sk(sk)->inet_sport,
 -              .fl4_dport = ireq->rmt_port,
 -      };
        struct net *net = sock_net(sk);
 +      struct flowi4 fl4;
  
 +      flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
 +                         RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 +                         sk->sk_protocol, inet_sk_flowi_flags(sk),
 +                         (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
 +                         ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
        security_req_classify_flow(req, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt))
This page took 0.273123 seconds and 4 git commands to generate.