]> Git Repo - linux.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <[email protected]>
Wed, 17 Feb 2010 06:09:29 +0000 (22:09 -0800)
committerDavid S. Miller <[email protected]>
Wed, 17 Feb 2010 06:09:29 +0000 (22:09 -0800)
1  2 
MAINTAINERS
arch/powerpc/kvm/Kconfig
drivers/net/benet/be_cmds.c
drivers/net/ixgbe/ixgbe_main.c
include/linux/pci.h
net/core/ethtool.c
net/ipv4/ipcomp.c
net/ipv6/ipcomp6.c
net/xfrm/xfrm_state.c

diff --combined MAINTAINERS
index fe17b8016bcf974c88f76e9dff6a5895177410bb,412eff60c33d87305bdb3c9ce4315f52682375fe..64a237bb01ab94d651d7aec6533e802302f171ae
@@@ -3411,8 -3411,10 +3411,10 @@@ S:    Maintaine
  F:    drivers/scsi/sym53c8xx_2/
  
  LTP (Linux Test Project)
- M:    Subrata Modak <[email protected]>
- M:    Mike Frysinger <[email protected]>
+ M:    Rishikesh K Rajak <[email protected]>
+ M:    Garrett Cooper <[email protected]>
+ M:     Mike Frysinger <[email protected]>
+ M:     Subrata Modak <[email protected]>
  L:    [email protected] (subscribers-only)
  W:    http://ltp.sourceforge.net/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
@@@ -4444,13 -4446,6 +4446,13 @@@ S:    Supporte
  F:    Documentation/networking/LICENSE.qla3xxx
  F:    drivers/net/qla3xxx.*
  
 +QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
 +M:    Amit Kumar Salecha <[email protected]>
 +M:    [email protected]
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/net/qlcnic/
 +
  QLOGIC QLGE 10Gb ETHERNET DRIVER
  M:    Ron Mercer <[email protected]>
  M:    [email protected]
@@@ -5802,15 -5797,6 +5804,15 @@@ S:    Maintaine
  F:    Documentation/filesystems/vfat.txt
  F:    fs/fat/
  
 +VIRTIO HOST (VHOST)
 +M:    "Michael S. Tsirkin" <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/vhost/
 +F:    include/linux/vhost.h
 +
  VIA RHINE NETWORK DRIVER
  M:    Roger Luethi <[email protected]>
  S:    Maintained
diff --combined arch/powerpc/kvm/Kconfig
index e28841fbfb8dd90a51a93e54d1f6dbb289a3dce2,6fb6e8aa389039a4258d4a2935817cf1c8428e3e..fe037fdaf1b392639a33030efd6b8a2190dc1ad4
@@@ -53,7 -53,7 +53,7 @@@ config KVM_44
  
  config KVM_EXIT_TIMING
        bool "Detailed exit timing"
-       depends on KVM
+       depends on KVM_440 || KVM_E500
        ---help---
          Calculate elapsed time for every exit/enter cycle. A per-vcpu
          report is available in debugfs kvm/vm#_vcpu#_timing.
@@@ -75,7 -75,6 +75,7 @@@ config KVM_E50
  
          If unsure, say N.
  
 +source drivers/vhost/Kconfig
  source drivers/virtio/Kconfig
  
  endif # VIRTUALIZATION
index 3397ee327e1fe7fe4fdc7d068d64b3abc5de6608,006cb2efcd2250691df59c94350252b4eee4b926..477f82bc647ebf93ad709378c3d497edde12b190
@@@ -167,14 -167,7 +167,14 @@@ static int be_mbox_db_ready_wait(struc
        u32 ready;
  
        do {
 -              ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
 +              ready = ioread32(db);
 +              if (ready == 0xffffffff) {
 +                      dev_err(&adapter->pdev->dev,
 +                              "pci slot disconnected\n");
 +                      return -1;
 +              }
 +
 +              ready &= MPU_MAILBOX_DB_RDY_MASK;
                if (ready)
                        break;
  
@@@ -205,11 -198,6 +205,11 @@@ static int be_mbox_notify_wait(struct b
        struct be_mcc_mailbox *mbox = mbox_mem->va;
        struct be_mcc_compl *compl = &mbox->compl;
  
 +      /* wait for ready to be set */
 +      status = be_mbox_db_ready_wait(adapter, db);
 +      if (status != 0)
 +              return status;
 +
        val |= MPU_MAILBOX_DB_HI_MASK;
        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@@ -308,6 -296,7 +308,7 @@@ static void be_cmd_hdr_prepare(struct b
        req_hdr->opcode = opcode;
        req_hdr->subsystem = subsystem;
        req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+       req_hdr->version = 0;
  }
  
  static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@@@ -408,9 -397,6 +409,9 @@@ int be_cmd_fw_clean(struct be_adapter *
        u8 *wrb;
        int status;
  
 +      if (adapter->eeh_err)
 +              return -EIO;
 +
        spin_lock(&adapter->mbox_lock);
  
        wrb = (u8 *)wrb_from_mbox(adapter);
@@@ -783,9 -769,6 +784,9 @@@ int be_cmd_q_destroy(struct be_adapter 
        u8 subsys = 0, opcode = 0;
        int status;
  
 +      if (adapter->eeh_err)
 +              return -EIO;
 +
        spin_lock(&adapter->mbox_lock);
  
        wrb = wrb_from_mbox(adapter);
@@@ -874,9 -857,6 +875,9 @@@ int be_cmd_if_destroy(struct be_adapte
        struct be_cmd_req_if_destroy *req;
        int status;
  
 +      if (adapter->eeh_err)
 +              return -EIO;
 +
        spin_lock(&adapter->mbox_lock);
  
        wrb = wrb_from_mbox(adapter);
@@@ -1395,7 -1375,7 +1396,7 @@@ int be_cmd_write_flashrom(struct be_ada
                        u32 flash_type, u32 flash_opcode, u32 buf_size)
  {
        struct be_mcc_wrb *wrb;
 -      struct be_cmd_write_flashrom *req = cmd->va;
 +      struct be_cmd_write_flashrom *req;
        struct be_sge *sge;
        int status;
  
@@@ -1429,8 -1409,7 +1430,8 @@@ err
        return status;
  }
  
 -int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
 +int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
 +                       int offset)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_write_flashrom *req;
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
  
 -      req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
 +      req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
        req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
 -      req->params.offset = 0x3FFFC;
 +      req->params.offset = offset;
        req->params.data_buf_size = 0x4;
  
        status = be_mcc_notify_wait(adapter);
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
  }
 +
 +extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
 +                              struct be_dma_mem *nonemb_cmd)
 +{
 +      struct be_mcc_wrb *wrb;
 +      struct be_cmd_req_seeprom_read *req;
 +      struct be_sge *sge;
 +      int status;
 +
 +      spin_lock_bh(&adapter->mcc_lock);
 +
 +      wrb = wrb_from_mccq(adapter);
 +      req = nonemb_cmd->va;
 +      sge = nonembedded_sgl(wrb);
 +
 +      be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
 +                      OPCODE_COMMON_SEEPROM_READ);
 +
 +      be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 +                      OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
 +
 +      sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
 +      sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
 +      sge->len = cpu_to_le32(nonemb_cmd->size);
 +
 +      status = be_mcc_notify_wait(adapter);
 +
 +      spin_unlock_bh(&adapter->mcc_lock);
 +      return status;
 +}
index 0792f151de99f299a4f8a5eaac85c0c43dffd42c,951b73cf5ca2b2f9dd53e4bcd6c8509740a659b0..43a8de3dc4d68e8f0ca6766b40fd1e86ff10e9a2
  #include "ixgbe.h"
  #include "ixgbe_common.h"
  #include "ixgbe_dcb_82599.h"
 +#include "ixgbe_sriov.h"
  
  char ixgbe_driver_name[] = "ixgbe";
  static const char ixgbe_driver_string[] =
                                "Intel(R) 10 Gigabit PCI Express Network Driver";
  
 -#define DRV_VERSION "2.0.44-k2"
 +#define DRV_VERSION "2.0.62-k2"
  const char ixgbe_driver_version[] = DRV_VERSION;
  static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
  
@@@ -68,7 -67,7 +68,7 @@@ static const struct ixgbe_info *ixgbe_i
   * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
   *   Class, Class Mask, private data (not used) }
   */
 -static struct pci_device_id ixgbe_pci_tbl[] = {
 +static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
         board_82598 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@@ -125,13 -124,6 +125,13 @@@ static struct notifier_block dca_notifi
  };
  #endif
  
 +#ifdef CONFIG_PCI_IOV
 +static unsigned int max_vfs;
 +module_param(max_vfs, uint, 0);
 +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
 +                 "per physical function");
 +#endif /* CONFIG_PCI_IOV */
 +
  MODULE_AUTHOR("Intel Corporation, <[email protected]>");
  MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
  MODULE_LICENSE("GPL");
@@@ -139,41 -131,6 +139,41 @@@ MODULE_VERSION(DRV_VERSION)
  
  #define DEFAULT_DEBUG_LEVEL_SHIFT 3
  
 +static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 gcr;
 +      u32 gpie;
 +      u32 vmdctl;
 +
 +#ifdef CONFIG_PCI_IOV
 +      /* disable iov and allow time for transactions to clear */
 +      pci_disable_sriov(adapter->pdev);
 +#endif
 +
 +      /* turn off device IOV mode */
 +      gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
 +      gcr &= ~(IXGBE_GCR_EXT_SRIOV);
 +      IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
 +      gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
 +      gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 +      IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 +
 +      /* set default pool back to 0 */
 +      vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 +      vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
 +      IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
 +
 +      /* take a breather then clean up driver data */
 +      msleep(100);
 +      if (adapter->vfinfo)
 +              kfree(adapter->vfinfo);
 +      adapter->vfinfo = NULL;
 +
 +      adapter->num_vfs = 0;
 +      adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 +}
 +
  static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
  {
        u32 ctrl_ext;
@@@ -494,7 -451,7 +494,7 @@@ static void ixgbe_update_rx_dca(struct 
  {
        u32 rxctrl;
        int cpu = get_cpu();
 -      int q = rx_ring - adapter->rx_ring;
 +      int q = rx_ring->reg_idx;
  
        if (rx_ring->cpu != cpu) {
                rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@@ -522,7 -479,7 +522,7 @@@ static void ixgbe_update_tx_dca(struct 
  {
        u32 txctrl;
        int cpu = get_cpu();
 -      int q = tx_ring - adapter->tx_ring;
 +      int q = tx_ring->reg_idx;
        struct ixgbe_hw *hw = &adapter->hw;
  
        if (tx_ring->cpu != cpu) {
@@@ -556,12 -513,12 +556,12 @@@ static void ixgbe_setup_dca(struct ixgb
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              adapter->tx_ring[i].cpu = -1;
 -              ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
 +              adapter->tx_ring[i]->cpu = -1;
 +              ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
        }
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              adapter->rx_ring[i].cpu = -1;
 -              ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
 +              adapter->rx_ring[i]->cpu = -1;
 +              ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
        }
  }
  
@@@ -1032,7 -989,7 +1032,7 @@@ static void ixgbe_configure_msix(struc
                                       adapter->num_rx_queues);
  
                for (i = 0; i < q_vector->rxr_count; i++) {
 -                      j = adapter->rx_ring[r_idx].reg_idx;
 +                      j = adapter->rx_ring[r_idx]->reg_idx;
                        ixgbe_set_ivar(adapter, 0, j, v_idx);
                        r_idx = find_next_bit(q_vector->rxr_idx,
                                              adapter->num_rx_queues,
                                       adapter->num_tx_queues);
  
                for (i = 0; i < q_vector->txr_count; i++) {
 -                      j = adapter->tx_ring[r_idx].reg_idx;
 +                      j = adapter->tx_ring[r_idx]->reg_idx;
                        ixgbe_set_ivar(adapter, 1, j, v_idx);
                        r_idx = find_next_bit(q_vector->txr_idx,
                                              adapter->num_tx_queues,
  
        /* set up to autoclear timer, and the vectors */
        mask = IXGBE_EIMS_ENABLE_MASK;
 -      mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
 +      if (adapter->num_vfs)
 +              mask &= ~(IXGBE_EIMS_OTHER |
 +                        IXGBE_EIMS_MAILBOX |
 +                        IXGBE_EIMS_LSC);
 +      else
 +              mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
  }
  
@@@ -1182,7 -1134,7 +1182,7 @@@ static void ixgbe_set_itr_msix(struct i
  
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
 -              tx_ring = &(adapter->tx_ring[r_idx]);
 +              tx_ring = adapter->tx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->tx_itr,
                                           tx_ring->total_packets,
  
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
 -              rx_ring = &(adapter->rx_ring[r_idx]);
 +              rx_ring = adapter->rx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->rx_itr,
                                           rx_ring->total_packets,
@@@ -1302,9 -1254,6 +1302,9 @@@ static irqreturn_t ixgbe_msix_lsc(int i
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
  
 +      if (eicr & IXGBE_EICR_MAILBOX)
 +              ixgbe_msg_task(adapter);
 +
        if (hw->mac.type == ixgbe_mac_82598EB)
                ixgbe_check_fan_failure(adapter, eicr);
  
                        netif_tx_stop_all_queues(netdev);
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *tx_ring =
 -                                                         &adapter->tx_ring[i];
 +                                                          adapter->tx_ring[i];
                                if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
                                                       &tx_ring->reinit_state))
                                        schedule_work(&adapter->fdir_reinit_task);
@@@ -1378,7 -1327,7 +1378,7 @@@ static irqreturn_t ixgbe_msix_clean_tx(
  
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
 -              tx_ring = &(adapter->tx_ring[r_idx]);
 +              tx_ring = adapter->tx_ring[r_idx];
                tx_ring->total_bytes = 0;
                tx_ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@@ -1406,7 -1355,7 +1406,7 @@@ static irqreturn_t ixgbe_msix_clean_rx(
  
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0;  i < q_vector->rxr_count; i++) {
 -              rx_ring = &(adapter->rx_ring[r_idx]);
 +              rx_ring = adapter->rx_ring[r_idx];
                rx_ring->total_bytes = 0;
                rx_ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@@ -1436,7 -1385,7 +1436,7 @@@ static irqreturn_t ixgbe_msix_clean_man
  
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
 -              ring = &(adapter->tx_ring[r_idx]);
 +              ring = adapter->tx_ring[r_idx];
                ring->total_bytes = 0;
                ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
  
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
 -              ring = &(adapter->rx_ring[r_idx]);
 +              ring = adapter->rx_ring[r_idx];
                ring->total_bytes = 0;
                ring->total_packets = 0;
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@@ -1476,7 -1425,7 +1476,7 @@@ static int ixgbe_clean_rxonly(struct na
        long r_idx;
  
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      rx_ring = &(adapter->rx_ring[r_idx]);
 +      rx_ring = adapter->rx_ring[r_idx];
  #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                ixgbe_update_rx_dca(adapter, rx_ring);
@@@ -1517,7 -1466,7 +1517,7 @@@ static int ixgbe_clean_rxtx_many(struc
  
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
 -              ring = &(adapter->tx_ring[r_idx]);
 +              ring = adapter->tx_ring[r_idx];
  #ifdef CONFIG_IXGBE_DCA
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                        ixgbe_update_tx_dca(adapter, ring);
        budget = max(budget, 1);
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
 -              ring = &(adapter->rx_ring[r_idx]);
 +              ring = adapter->rx_ring[r_idx];
  #ifdef CONFIG_IXGBE_DCA
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                        ixgbe_update_rx_dca(adapter, ring);
        }
  
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 -      ring = &(adapter->rx_ring[r_idx]);
 +      ring = adapter->rx_ring[r_idx];
        /* If all Rx work done, exit the polling mode */
        if (work_done < budget) {
                napi_complete(napi);
@@@ -1577,7 -1526,7 +1577,7 @@@ static int ixgbe_clean_txonly(struct na
        long r_idx;
  
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 -      tx_ring = &(adapter->tx_ring[r_idx]);
 +      tx_ring = adapter->tx_ring[r_idx];
  #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
                ixgbe_update_tx_dca(adapter, tx_ring);
@@@ -1762,8 -1711,8 +1762,8 @@@ static void ixgbe_set_itr(struct ixgbe_
        struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
        u8 current_itr;
        u32 new_itr = q_vector->eitr;
 -      struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
 -      struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
 +      struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
 +      struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
  
        q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
                                            q_vector->tx_itr,
@@@ -1819,8 -1768,6 +1819,8 @@@ static inline void ixgbe_irq_enable(str
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
 +              if (adapter->num_vfs)
 +                      mask |= IXGBE_EIMS_MAILBOX;
        }
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
        ixgbe_irq_enable_queues(adapter, ~0);
        IXGBE_WRITE_FLUSH(&adapter->hw);
 +
 +      if (adapter->num_vfs > 32) {
 +              u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
 +              IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
 +      }
  }
  
  /**
@@@ -1875,10 -1817,10 +1875,10 @@@ static irqreturn_t ixgbe_intr(int irq, 
        ixgbe_check_fan_failure(adapter, eicr);
  
        if (napi_schedule_prep(&(q_vector->napi))) {
 -              adapter->tx_ring[0].total_packets = 0;
 -              adapter->tx_ring[0].total_bytes = 0;
 -              adapter->rx_ring[0].total_packets = 0;
 -              adapter->rx_ring[0].total_bytes = 0;
 +              adapter->tx_ring[0]->total_packets = 0;
 +              adapter->tx_ring[0]->total_bytes = 0;
 +              adapter->rx_ring[0]->total_packets = 0;
 +              adapter->rx_ring[0]->total_bytes = 0;
                /* would disable interrupts here but EIAM disabled it */
                __napi_schedule(&(q_vector->napi));
        }
@@@ -1963,8 -1905,6 +1963,8 @@@ static inline void ixgbe_irq_disable(st
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
 +              if (adapter->num_vfs > 32)
 +                      IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@@ -2010,7 -1950,7 +2010,7 @@@ static void ixgbe_configure_tx(struct i
  
        /* Setup the HW Tx Head and Tail descriptor pointers */
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              struct ixgbe_ring *ring = &adapter->tx_ring[i];
 +              struct ixgbe_ring *ring = adapter->tx_ring[i];
                j = ring->reg_idx;
                tdba = ring->dma;
                tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
                IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
                IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
                IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
 -              adapter->tx_ring[i].head = IXGBE_TDH(j);
 -              adapter->tx_ring[i].tail = IXGBE_TDT(j);
 +              adapter->tx_ring[i]->head = IXGBE_TDH(j);
 +              adapter->tx_ring[i]->tail = IXGBE_TDT(j);
                /*
                 * Disable Tx Head Writeback RO bit, since this hoses
                 * bookkeeping if things aren't delivered in order.
  
        if (hw->mac.type == ixgbe_mac_82599EB) {
                u32 rttdcs;
 +              u32 mask;
  
                /* disable the arbiter while setting MTQC */
                rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
                rttdcs |= IXGBE_RTTDCS_ARBDIS;
                IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  
 -              /* We enable 8 traffic classes, DCB only */
 -              if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
 -                      IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
 -                                      IXGBE_MTQC_8TC_8TQ));
 -              else
 +              /* set transmit pool layout */
 +              mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
 +              switch (adapter->flags & mask) {
 +
 +              case (IXGBE_FLAG_SRIOV_ENABLED):
 +                      IXGBE_WRITE_REG(hw, IXGBE_MTQC,
 +                                      (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
 +                      break;
 +
 +              case (IXGBE_FLAG_DCB_ENABLED):
 +                      /* We enable 8 traffic classes, DCB only */
 +                      IXGBE_WRITE_REG(hw, IXGBE_MTQC,
 +                                    (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
 +                      break;
 +
 +              default:
                        IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
 +                      break;
 +              }
  
                /* re-eable the arbiter */
                rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@@ -2133,16 -2059,12 +2133,16 @@@ static u32 ixgbe_setup_mrqc(struct ixgb
  #ifdef CONFIG_IXGBE_DCB
                                 | IXGBE_FLAG_DCB_ENABLED
  #endif
 +                               | IXGBE_FLAG_SRIOV_ENABLED
                                );
  
        switch (mask) {
        case (IXGBE_FLAG_RSS_ENABLED):
                mrqc = IXGBE_MRQC_RSSEN;
                break;
 +      case (IXGBE_FLAG_SRIOV_ENABLED):
 +              mrqc = IXGBE_MRQC_VMDQEN;
 +              break;
  #ifdef CONFIG_IXGBE_DCB
        case (IXGBE_FLAG_DCB_ENABLED):
                mrqc = IXGBE_MRQC_RT8TCEN;
@@@ -2168,7 -2090,7 +2168,7 @@@ static void ixgbe_configure_rscctl(stru
        u32 rscctrl;
        int rx_buf_len;
  
 -      rx_ring = &adapter->rx_ring[index];
 +      rx_ring = adapter->rx_ring[index];
        j = rx_ring->reg_idx;
        rx_buf_len = rx_ring->rx_buf_len;
        rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@@ -2223,9 -2145,7 +2223,9 @@@ static void ixgbe_configure_rx(struct i
        int rx_buf_len;
  
        /* Decide whether to use packet split mode or not */
 -      adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 +      /* Do not use packet split if we're in SR-IOV Mode */
 +      if (!adapter->num_vfs)
 +              adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
  
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
                                      IXGBE_PSRTYPE_IPV4HDR |
                                      IXGBE_PSRTYPE_IPV6HDR |
                                      IXGBE_PSRTYPE_L2HDR;
 -                      IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
 +                      IXGBE_WRITE_REG(hw,
 +                                      IXGBE_PSRTYPE(adapter->num_vfs),
 +                                      psrtype);
                }
        } else {
                if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
  #endif
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
  
 -      rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
 +      rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
        /* disable receives while setting up the descriptors */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
         * the Base and Length of the Rx Descriptor Ring
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              rx_ring = &adapter->rx_ring[i];
 +              rx_ring = adapter->rx_ring[i];
                rdba = rx_ring->dma;
                j = rx_ring->reg_idx;
                IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
                IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
        }
  
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 +              u32 vt_reg_bits;
 +              u32 reg_offset, vf_shift;
 +              u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 +              vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
 +                      | IXGBE_VT_CTL_REPLEN;
 +              vt_reg_bits |= (adapter->num_vfs <<
 +                              IXGBE_VT_CTL_POOL_SHIFT);
 +              IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
 +              IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
 +
 +              vf_shift = adapter->num_vfs % 32;
 +              reg_offset = adapter->num_vfs / 32;
 +              IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
 +              IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
 +              IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
 +              IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
 +              /* Enable only the PF's pool for Tx/Rx */
 +              IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
 +              IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
 +              IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 +              ixgbe_set_vmolr(hw, adapter->num_vfs);
 +      }
 +
        /* Program MRQC for the distribution of queues */
        mrqc = ixgbe_setup_mrqc(adapter);
  
        }
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  
 +      if (adapter->num_vfs) {
 +              u32 reg;
 +
 +              /* Map PF MAC address in RAR Entry 0 to first pool
 +               * following VFs */
 +              hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
 +
 +              /* Set up VF register offsets for selected VT Mode, i.e.
 +               * 64 VFs for SR-IOV */
 +              reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
 +              reg |= IXGBE_GCR_EXT_SRIOV;
 +              IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
 +      }
 +
        rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@@ -2432,17 -2312,15 +2432,17 @@@ static void ixgbe_vlan_rx_add_vid(struc
  {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 +      int pool_ndx = adapter->num_vfs;
  
        /* add VID to filter table */
 -      hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
 +      hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
  }
  
  static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
  {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 +      int pool_ndx = adapter->num_vfs;
  
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_disable(adapter);
                ixgbe_irq_enable(adapter);
  
        /* remove VID from filter table */
 -      hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
 +      hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
  }
  
  static void ixgbe_vlan_rx_register(struct net_device *netdev,
        } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        u32 ctrl;
 -                      j = adapter->rx_ring[i].reg_idx;
 +                      j = adapter->rx_ring[i]->reg_idx;
                        ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
                        ctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@@ -2536,7 -2414,7 +2536,7 @@@ static u8 *ixgbe_addr_list_itr(struct i
   * responsible for configuring the hardware for proper unicast, multicast and
   * promiscuous mode.
   **/
 -static void ixgbe_set_rx_mode(struct net_device *netdev)
 +void ixgbe_set_rx_mode(struct net_device *netdev)
  {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  
        /* reprogram secondary unicast list */
 -      hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
 +      hw->mac.ops.update_uc_addr_list(hw, netdev);
  
        /* reprogram multicast list */
 -      addr_count = netdev->mc_count;
 +      addr_count = netdev_mc_count(netdev);
        if (addr_count)
                addr_list = netdev->mc_list->dmi_addr;
        hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
                                        ixgbe_addr_list_itr);
 +      if (adapter->num_vfs)
 +              ixgbe_restore_vf_multicasts(adapter);
  }
  
  static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@@ -2646,7 -2522,7 +2646,7 @@@ static void ixgbe_configure_dcb(struct 
        ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              j = adapter->tx_ring[i].reg_idx;
 +              j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                /* PThresh workaround for Tx hang with DFP enabled. */
                txdctl |= 32;
                vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                for (i = 0; i < adapter->num_rx_queues; i++) {
 -                      j = adapter->rx_ring[i].reg_idx;
 +                      j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
                        vlnctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@@ -2703,7 -2579,7 +2703,7 @@@ static void ixgbe_configure(struct ixgb
  #endif /* IXGBE_FCOE */
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
                for (i = 0; i < adapter->num_tx_queues; i++)
 -                      adapter->tx_ring[i].atr_sample_rate =
 +                      adapter->tx_ring[i]->atr_sample_rate =
                                                       adapter->atr_sample_rate;
                ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
        } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
 -                                     (adapter->rx_ring[i].count - 1));
 +              ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
 +                                     (adapter->rx_ring[i]->count - 1));
  }
  
  static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@@ -2797,7 -2673,7 +2797,7 @@@ link_cfg_out
  static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
                                              int rxr)
  {
 -      int j = adapter->rx_ring[rxr].reg_idx;
 +      int j = adapter->rx_ring[rxr]->reg_idx;
        int k;
  
        for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
                DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
                        "not set within the polling period\n", rxr);
        }
 -      ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
 -                            (adapter->rx_ring[rxr].count - 1));
 +      ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
 +                            (adapter->rx_ring[rxr]->count - 1));
  }
  
  static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        u32 txdctl, rxdctl, mhadd;
        u32 dmatxctl;
        u32 gpie;
 +      u32 ctrl_ext;
  
        ixgbe_get_hw_control(adapter);
  
                        /* MSI only */
                        gpie = 0;
                }
 +              if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 +                      gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 +                      gpie |= IXGBE_GPIE_VTMODE_64;
 +              }
                /* XXX: to interrupt immediately for EICS writes, enable this */
                /* gpie |= IXGBE_GPIE_EIMEN; */
                IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
        }
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              j = adapter->tx_ring[i].reg_idx;
 +              j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                /* enable WTHRESH=8 descriptors, to encourage burst writeback */
                txdctl |= (8 << 16);
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
        }
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              j = adapter->tx_ring[i].reg_idx;
 +              j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                txdctl |= IXGBE_TXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
 +              if (hw->mac.type == ixgbe_mac_82599EB) {
 +                      int wait_loop = 10;
 +                      /* poll for Tx Enable ready */
 +                      do {
 +                              msleep(1);
 +                              txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
 +                      } while (--wait_loop &&
 +                               !(txdctl & IXGBE_TXDCTL_ENABLE));
 +                      if (!wait_loop)
 +                              DPRINTK(DRV, ERR, "Could not enable "
 +                                      "Tx Queue %d\n", j);
 +              }
        }
  
        for (i = 0; i < num_rx_rings; i++) {
 -              j = adapter->rx_ring[i].reg_idx;
 +              j = adapter->rx_ring[i]->reg_idx;
                rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
                /* enable PTHRESH=32 descriptors (half the internal cache)
                 * and HTHRESH=0 descriptors (to minimize latency on fetch),
  
        for (i = 0; i < adapter->num_tx_queues; i++)
                set_bit(__IXGBE_FDIR_INIT_DONE,
 -                      &(adapter->tx_ring[i].reinit_state));
 +                      &(adapter->tx_ring[i]->reinit_state));
  
        /* enable transmits */
        netif_tx_start_all_queues(netdev);
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
        adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->watchdog_timer, jiffies);
 +
 +      /* Set PF Reset Done bit so PF/VF Mail Ops can work */
 +      ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 +      ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
 +      IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
 +
        return 0;
  }
  
@@@ -3070,8 -2923,7 +3070,8 @@@ void ixgbe_reset(struct ixgbe_adapter *
        }
  
        /* reprogram the RAR[0] in case user changed it. */
 -      hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 +      hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
 +                          IXGBE_RAH_AV);
  }
  
  /**
@@@ -3177,7 -3029,7 +3177,7 @@@ static void ixgbe_clean_all_rx_rings(st
        int i;
  
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
 +              ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
  }
  
  /**
@@@ -3189,7 -3041,7 +3189,7 @@@ static void ixgbe_clean_all_tx_rings(st
        int i;
  
        for (i = 0; i < adapter->num_tx_queues; i++)
 -              ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
 +              ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
  }
  
  void ixgbe_down(struct ixgbe_adapter *adapter)
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBE_DOWN, &adapter->state);
  
 +      /* disable receive for all VFs and wait one second */
 +      if (adapter->num_vfs) {
 +              for (i = 0 ; i < adapter->num_vfs; i++)
 +                      adapter->vfinfo[i].clear_to_send = 0;
 +
 +              /* ping all the active vfs to let them know we are going down */
 +              ixgbe_ping_all_vfs(adapter);
 +              /* Disable all VFTE/VFRE TX/RX */
 +              ixgbe_disable_tx_rx(adapter);
 +      }
 +
        /* disable receives */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
  
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              j = adapter->tx_ring[i].reg_idx;
 +              j = adapter->tx_ring[i]->reg_idx;
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
  
        netif_carrier_off(netdev);
  
 +      /* clear n-tuple filters that are cached */
 +      ethtool_ntuple_flush(netdev);
 +
        if (!pci_channel_offline(adapter->pdev))
                ixgbe_reset(adapter);
        ixgbe_clean_all_tx_rings(adapter);
@@@ -3283,13 -3121,13 +3283,13 @@@ static int ixgbe_poll(struct napi_struc
  
  #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 -              ixgbe_update_tx_dca(adapter, adapter->tx_ring);
 -              ixgbe_update_rx_dca(adapter, adapter->rx_ring);
 +              ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
 +              ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
        }
  #endif
  
 -      tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
 -      ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
 +      tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
 +      ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
  
        if (!tx_clean_complete)
                work_done = budget;
@@@ -3453,19 -3291,6 +3453,19 @@@ static inline bool ixgbe_set_fcoe_queue
  }
  
  #endif /* IXGBE_FCOE */
 +/**
 + * ixgbe_set_sriov_queues: Allocate queues for IOV use
 + * @adapter: board private structure to initialize
 + *
 + * IOV doesn't actually use anything, so just NAK the
 + * request for now and let the other queue routines
 + * figure out what to do.
 + */
 +static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 +{
 +      return false;
 +}
 +
  /*
   * ixgbe_set_num_queues: Allocate queues for device, feature dependant
   * @adapter: board private structure to initialize
   **/
  static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
  {
 +      /* Start with base case */
 +      adapter->num_rx_queues = 1;
 +      adapter->num_tx_queues = 1;
 +      adapter->num_rx_pools = adapter->num_rx_queues;
 +      adapter->num_rx_queues_per_pool = 1;
 +
 +      if (ixgbe_set_sriov_queues(adapter))
 +              return;
 +
  #ifdef IXGBE_FCOE
        if (ixgbe_set_fcoe_queues(adapter))
                goto done;
@@@ -3577,9 -3393,9 +3577,9 @@@ static inline bool ixgbe_cache_ring_rss
  
        if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
                for (i = 0; i < adapter->num_rx_queues; i++)
 -                      adapter->rx_ring[i].reg_idx = i;
 +                      adapter->rx_ring[i]->reg_idx = i;
                for (i = 0; i < adapter->num_tx_queues; i++)
 -                      adapter->tx_ring[i].reg_idx = i;
 +                      adapter->tx_ring[i]->reg_idx = i;
                ret = true;
        } else {
                ret = false;
@@@ -3606,8 -3422,8 +3606,8 @@@ static inline bool ixgbe_cache_ring_dcb
                if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
                        /* the number of queues is assumed to be symmetric */
                        for (i = 0; i < dcb_i; i++) {
 -                              adapter->rx_ring[i].reg_idx = i << 3;
 -                              adapter->tx_ring[i].reg_idx = i << 2;
 +                              adapter->rx_ring[i]->reg_idx = i << 3;
 +                              adapter->tx_ring[i]->reg_idx = i << 2;
                        }
                        ret = true;
                } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
                                 * Rx TC0-TC7 are offset by 16 queues each
                                 */
                                for (i = 0; i < 3; i++) {
 -                                      adapter->tx_ring[i].reg_idx = i << 5;
 -                                      adapter->rx_ring[i].reg_idx = i << 4;
 +                                      adapter->tx_ring[i]->reg_idx = i << 5;
 +                                      adapter->rx_ring[i]->reg_idx = i << 4;
                                }
                                for ( ; i < 5; i++) {
 -                                      adapter->tx_ring[i].reg_idx =
 +                                      adapter->tx_ring[i]->reg_idx =
                                                                 ((i + 2) << 4);
 -                                      adapter->rx_ring[i].reg_idx = i << 4;
 +                                      adapter->rx_ring[i]->reg_idx = i << 4;
                                }
                                for ( ; i < dcb_i; i++) {
 -                                      adapter->tx_ring[i].reg_idx =
 +                                      adapter->tx_ring[i]->reg_idx =
                                                                 ((i + 8) << 3);
 -                                      adapter->rx_ring[i].reg_idx = i << 4;
 +                                      adapter->rx_ring[i]->reg_idx = i << 4;
                                }
  
                                ret = true;
                                 *
                                 * Rx TC0-TC3 are offset by 32 queues each
                                 */
 -                              adapter->tx_ring[0].reg_idx = 0;
 -                              adapter->tx_ring[1].reg_idx = 64;
 -                              adapter->tx_ring[2].reg_idx = 96;
 -                              adapter->tx_ring[3].reg_idx = 112;
 +                              adapter->tx_ring[0]->reg_idx = 0;
 +                              adapter->tx_ring[1]->reg_idx = 64;
 +                              adapter->tx_ring[2]->reg_idx = 96;
 +                              adapter->tx_ring[3]->reg_idx = 112;
                                for (i = 0 ; i < dcb_i; i++)
 -                                      adapter->rx_ring[i].reg_idx = i << 5;
 +                                      adapter->rx_ring[i]->reg_idx = i << 5;
  
                                ret = true;
                        } else {
@@@ -3687,9 -3503,9 +3687,9 @@@ static bool inline ixgbe_cache_ring_fdi
            ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
             (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
                for (i = 0; i < adapter->num_rx_queues; i++)
 -                      adapter->rx_ring[i].reg_idx = i;
 +                      adapter->rx_ring[i]->reg_idx = i;
                for (i = 0; i < adapter->num_tx_queues; i++)
 -                      adapter->tx_ring[i].reg_idx = i;
 +                      adapter->tx_ring[i]->reg_idx = i;
                ret = true;
        }
  
@@@ -3717,8 -3533,8 +3717,8 @@@ static inline bool ixgbe_cache_ring_fco
  
                        ixgbe_cache_ring_dcb(adapter);
                        /* find out queues in TC for FCoE */
 -                      fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
 -                      fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
 +                      fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
 +                      fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
                        /*
                         * In 82599, the number of Tx queues for each traffic
                         * class for both 8-TC and 4-TC modes are:
                        fcoe_tx_i = f->mask;
                }
                for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
 -                      adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
 -                      adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
 +                      adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
 +                      adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
                }
                ret = true;
        }
  }
  
  #endif /* IXGBE_FCOE */
 +/**
 + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
 + * @adapter: board private structure to initialize
 + *
 + * SR-IOV doesn't use any descriptor rings but changes the default if
 + * no other mapping is used.
 + *
 + */
 +static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 +{
 +      adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
 +      adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
 +      if (adapter->num_vfs)
 +              return true;
 +      else
 +              return false;
 +}
 +
  /**
   * ixgbe_cache_ring_register - Descriptor ring to register mapping
   * @adapter: board private structure to initialize
  static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  {
        /* start with default case */
 -      adapter->rx_ring[0].reg_idx = 0;
 -      adapter->tx_ring[0].reg_idx = 0;
 +      adapter->rx_ring[0]->reg_idx = 0;
 +      adapter->tx_ring[0]->reg_idx = 0;
 +
 +      if (ixgbe_cache_ring_sriov(adapter))
 +              return;
  
  #ifdef IXGBE_FCOE
        if (ixgbe_cache_ring_fcoe(adapter))
  static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
  {
        int i;
 -
 -      adapter->tx_ring = kcalloc(adapter->num_tx_queues,
 -                                 sizeof(struct ixgbe_ring), GFP_KERNEL);
 -      if (!adapter->tx_ring)
 -              goto err_tx_ring_allocation;
 -
 -      adapter->rx_ring = kcalloc(adapter->num_rx_queues,
 -                                 sizeof(struct ixgbe_ring), GFP_KERNEL);
 -      if (!adapter->rx_ring)
 -              goto err_rx_ring_allocation;
 +      int orig_node = adapter->node;
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              adapter->tx_ring[i].count = adapter->tx_ring_count;
 -              adapter->tx_ring[i].queue_index = i;
 +              struct ixgbe_ring *ring = adapter->tx_ring[i];
 +              if (orig_node == -1) {
 +                      int cur_node = next_online_node(adapter->node);
 +                      if (cur_node == MAX_NUMNODES)
 +                              cur_node = first_online_node;
 +                      adapter->node = cur_node;
 +              }
 +              ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
 +                                  adapter->node);
 +              if (!ring)
 +                      ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
 +              if (!ring)
 +                      goto err_tx_ring_allocation;
 +              ring->count = adapter->tx_ring_count;
 +              ring->queue_index = i;
 +              ring->numa_node = adapter->node;
 +
 +              adapter->tx_ring[i] = ring;
        }
  
 +      /* Restore the adapter's original node */
 +      adapter->node = orig_node;
 +
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              adapter->rx_ring[i].count = adapter->rx_ring_count;
 -              adapter->rx_ring[i].queue_index = i;
 +              struct ixgbe_ring *ring = adapter->rx_ring[i];
 +              if (orig_node == -1) {
 +                      int cur_node = next_online_node(adapter->node);
 +                      if (cur_node == MAX_NUMNODES)
 +                              cur_node = first_online_node;
 +                      adapter->node = cur_node;
 +              }
 +              ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
 +                                  adapter->node);
 +              if (!ring)
 +                      ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
 +              if (!ring)
 +                      goto err_rx_ring_allocation;
 +              ring->count = adapter->rx_ring_count;
 +              ring->queue_index = i;
 +              ring->numa_node = adapter->node;
 +
 +              adapter->rx_ring[i] = ring;
        }
  
 +      /* Restore the adapter's original node */
 +      adapter->node = orig_node;
 +
        ixgbe_cache_ring_register(adapter);
  
        return 0;
  
  err_rx_ring_allocation:
 -      kfree(adapter->tx_ring);
 +      for (i = 0; i < adapter->num_tx_queues; i++)
 +              kfree(adapter->tx_ring[i]);
  err_tx_ring_allocation:
        return -ENOMEM;
  }
@@@ -3935,9 -3700,6 +3935,9 @@@ static int ixgbe_set_interrupt_capabili
        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
        adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
        adapter->atr_sample_rate = 0;
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              ixgbe_disable_sriov(adapter);
 +
        ixgbe_set_num_queues(adapter);
  
        err = pci_enable_msi(adapter->pdev);
@@@ -3979,11 -3741,7 +3979,11 @@@ static int ixgbe_alloc_q_vectors(struc
        }
  
        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
 -              q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
 +              q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
 +                                      GFP_KERNEL, adapter->node);
 +              if (!q_vector)
 +                      q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
 +                                         GFP_KERNEL);
                if (!q_vector)
                        goto err_out;
                q_vector->adapter = adapter;
@@@ -4110,16 -3868,10 +4110,16 @@@ err_set_interrupt
   **/
  void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
  {
 -      kfree(adapter->tx_ring);
 -      kfree(adapter->rx_ring);
 -      adapter->tx_ring = NULL;
 -      adapter->rx_ring = NULL;
 +      int i;
 +
 +      for (i = 0; i < adapter->num_tx_queues; i++) {
 +              kfree(adapter->tx_ring[i]);
 +              adapter->tx_ring[i] = NULL;
 +      }
 +      for (i = 0; i < adapter->num_rx_queues; i++) {
 +              kfree(adapter->rx_ring[i]);
 +              adapter->rx_ring[i] = NULL;
 +      }
  
        ixgbe_free_q_vectors(adapter);
        ixgbe_reset_interrupt_capability(adapter);
@@@ -4190,7 -3942,6 +4190,7 @@@ static int __devinit ixgbe_sw_init(stru
  {
        struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
 +      struct net_device *dev = adapter->netdev;
        unsigned int rss;
  #ifdef CONFIG_IXGBE_DCB
        int j;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
                adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
                adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
 -              adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +              if (dev->features & NETIF_F_NTUPLE) {
 +                      /* Flow Director perfect filter enabled */
 +                      adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 +                      adapter->atr_sample_rate = 0;
 +                      spin_lock_init(&adapter->fdir_perfect_lock);
 +              } else {
 +                      /* Flow Director hash filters enabled */
 +                      adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 +                      adapter->atr_sample_rate = 20;
 +              }
                adapter->ring_feature[RING_F_FDIR].indices =
                                                         IXGBE_MAX_FDIR_INDICES;
 -              adapter->atr_sample_rate = 20;
                adapter->fdir_pballoc = 0;
  #ifdef IXGBE_FCOE
                adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
        /* enable rx csum by default */
        adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
  
 +      /* get assigned NUMA node */
 +      adapter->node = dev_to_node(&pdev->dev);
 +
        set_bit(__IXGBE_DOWN, &adapter->state);
  
        return 0;
@@@ -4320,9 -4060,7 +4320,9 @@@ int ixgbe_setup_tx_resources(struct ixg
        int size;
  
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
 -      tx_ring->tx_buffer_info = vmalloc(size);
 +      tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
 +      if (!tx_ring->tx_buffer_info)
 +              tx_ring->tx_buffer_info = vmalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
        memset(tx_ring->tx_buffer_info, 0, size);
@@@ -4364,7 -4102,7 +4364,7 @@@ static int ixgbe_setup_all_tx_resources
        int i, err = 0;
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
 +              err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
                if (!err)
                        continue;
                DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
@@@ -4388,9 -4126,7 +4388,9 @@@ int ixgbe_setup_rx_resources(struct ixg
        int size;
  
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 -      rx_ring->rx_buffer_info = vmalloc(size);
 +      rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
 +      if (!rx_ring->rx_buffer_info)
 +              rx_ring->rx_buffer_info = vmalloc(size);
        if (!rx_ring->rx_buffer_info) {
                DPRINTK(PROBE, ERR,
                        "vmalloc allocation failed for the rx desc ring\n");
@@@ -4436,7 -4172,7 +4436,7 @@@ static int ixgbe_setup_all_rx_resources
        int i, err = 0;
  
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
 +              err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
                if (!err)
                        continue;
                DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
@@@ -4479,8 -4215,8 +4479,8 @@@ static void ixgbe_free_all_tx_resources
        int i;
  
        for (i = 0; i < adapter->num_tx_queues; i++)
 -              if (adapter->tx_ring[i].desc)
 -                      ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
 +              if (adapter->tx_ring[i]->desc)
 +                      ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
  }
  
  /**
@@@ -4516,8 -4252,8 +4516,8 @@@ static void ixgbe_free_all_rx_resources
        int i;
  
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              if (adapter->rx_ring[i].desc)
 -                      ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
 +              if (adapter->rx_ring[i]->desc)
 +                      ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
  }
  
  /**
@@@ -4794,8 -4530,8 +4794,8 @@@ void ixgbe_update_stats(struct ixgbe_ad
                        adapter->hw_rx_no_dma_resources +=
                                             IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
                for (i = 0; i < adapter->num_rx_queues; i++) {
 -                      rsc_count += adapter->rx_ring[i].rsc_count;
 -                      rsc_flush += adapter->rx_ring[i].rsc_flush;
 +                      rsc_count += adapter->rx_ring[i]->rsc_count;
 +                      rsc_flush += adapter->rx_ring[i]->rsc_flush;
                }
                adapter->rsc_total_count = rsc_count;
                adapter->rsc_total_flush = rsc_flush;
  
        /* gather some stats to the adapter struct that are per queue */
        for (i = 0; i < adapter->num_tx_queues; i++)
 -              restart_queue += adapter->tx_ring[i].restart_queue;
 +              restart_queue += adapter->tx_ring[i]->restart_queue;
        adapter->restart_queue = restart_queue;
  
        for (i = 0; i < adapter->num_rx_queues; i++)
 -              non_eop_descs += adapter->rx_ring[i].non_eop_descs;
 +              non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
        adapter->non_eop_descs = non_eop_descs;
  
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@@ -5046,7 -4782,7 +5046,7 @@@ static void ixgbe_fdir_reinit_task(stru
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
                        set_bit(__IXGBE_FDIR_INIT_DONE,
 -                              &(adapter->tx_ring[i].reinit_state));
 +                              &(adapter->tx_ring[i]->reinit_state));
        } else {
                DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
                        "ignored adding FDIR ATR filters \n");
        netif_tx_start_all_queues(adapter->netdev);
  }
  
 +static DEFINE_MUTEX(ixgbe_watchdog_lock);
 +
  /**
   * ixgbe_watchdog_task - worker thread to bring link up
   * @work: pointer to work_struct containing our data
@@@ -5068,16 -4802,13 +5068,16 @@@ static void ixgbe_watchdog_task(struct 
                                                     watchdog_task);
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
 -      u32 link_speed = adapter->link_speed;
 -      bool link_up = adapter->link_up;
 +      u32 link_speed;
 +      bool link_up;
        int i;
        struct ixgbe_ring *tx_ring;
        int some_tx_pending = 0;
  
 -      adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
 +      mutex_lock(&ixgbe_watchdog_lock);
 +
 +      link_up = adapter->link_up;
 +      link_speed = adapter->link_speed;
  
        if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  
        if (!netif_carrier_ok(netdev)) {
                for (i = 0; i < adapter->num_tx_queues; i++) {
 -                      tx_ring = &adapter->tx_ring[i];
 +                      tx_ring = adapter->tx_ring[i];
                        if (tx_ring->next_to_use != tx_ring->next_to_clean) {
                                some_tx_pending = 1;
                                break;
        }
  
        ixgbe_update_stats(adapter);
 -      adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
 +      mutex_unlock(&ixgbe_watchdog_lock);
  }
  
  static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@@ -5650,7 -5381,7 +5650,7 @@@ static netdev_tx_t ixgbe_xmit_frame(str
                }
        }
  
 -      tx_ring = &adapter->tx_ring[skb->queue_mapping];
 +      tx_ring = adapter->tx_ring[skb->queue_mapping];
  
        if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
            (skb->protocol == htons(ETH_P_FCOE))) {
@@@ -5756,8 -5487,7 +5756,8 @@@ static int ixgbe_set_mac(struct net_dev
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  
 -      hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 +      hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
 +                          IXGBE_RAH_AV);
  
        return 0;
  }
@@@ -5894,61 -5624,6 +5894,61 @@@ static const struct net_device_ops ixgb
  #endif /* IXGBE_FCOE */
  };
  
 +static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
 +                         const struct ixgbe_info *ii)
 +{
 +#ifdef CONFIG_PCI_IOV
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int err;
 +
 +      if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
 +              return;
 +
 +      /* The 82599 supports up to 64 VFs per physical function
 +       * but this implementation limits allocation to 63 so that
 +       * basic networking resources are still available to the
 +       * physical function
 +       */
 +      adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
 +      adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
 +      err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 +      if (err) {
 +              DPRINTK(PROBE, ERR,
 +                      "Failed to enable PCI sriov: %d\n", err);
 +              goto err_novfs;
 +      }
 +      /* If call to enable VFs succeeded then allocate memory
 +       * for per VF control structures.
 +       */
 +      adapter->vfinfo =
 +              kcalloc(adapter->num_vfs,
 +                      sizeof(struct vf_data_storage), GFP_KERNEL);
 +      if (adapter->vfinfo) {
 +              /* Now that we're sure SR-IOV is enabled
 +               * and memory allocated set up the mailbox parameters
 +               */
 +              ixgbe_init_mbx_params_pf(hw);
 +              memcpy(&hw->mbx.ops, ii->mbx_ops,
 +                     sizeof(hw->mbx.ops));
 +
 +              /* Disable RSC when in SR-IOV mode */
 +              adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
 +                                   IXGBE_FLAG2_RSC_ENABLED);
 +              return;
 +      }
 +
 +      /* Oh oh */
 +      DPRINTK(PROBE, ERR,
 +              "Unable to allocate memory for VF "
 +              "Data Storage - SRIOV disabled\n");
 +      pci_disable_sriov(adapter->pdev);
 +
 +err_novfs:
 +      adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 +      adapter->num_vfs = 0;
 +#endif /* CONFIG_PCI_IOV */
 +}
 +
  /**
   * ixgbe_probe - Device Initialization Routine
   * @pdev: PCI device information struct
@@@ -6088,6 -5763,10 +6088,10 @@@ static int __devinit ixgbe_probe(struc
        if (err)
                goto err_sw_init;
  
+       /* Make it possible the adapter to be woken up via WOL */
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
        /*
         * If there is a fan on this device and it has failed log the
         * failure.
                goto err_sw_init;
        }
  
 +      ixgbe_probe_vf(adapter, ii);
 +
        netdev->features = NETIF_F_SG |
                           NETIF_F_IP_CSUM |
                           NETIF_F_HW_VLAN_TX |
        netdev->vlan_features |= NETIF_F_IPV6_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
  
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
 +                                  IXGBE_FLAG_DCB_ENABLED);
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
                adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
  
                ixgbe_setup_dca(adapter);
        }
  #endif
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 +              DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
 +                      adapter->num_vfs);
 +              for (i = 0; i < adapter->num_vfs; i++)
 +                      ixgbe_vf_configuration(pdev, (i | 0x10000000));
 +      }
 +
        /* add san mac addr to netdev */
        ixgbe_add_sanmac_netdev(netdev);
  
@@@ -6293,8 -5960,6 +6297,8 @@@ err_register
        ixgbe_clear_interrupt_scheme(adapter);
  err_sw_init:
  err_eeprom:
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              ixgbe_disable_sriov(adapter);
        clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
        del_timer_sync(&adapter->sfp_timer);
        cancel_work_sync(&adapter->sfp_task);
@@@ -6363,9 -6028,6 +6367,9 @@@ static void __devexit ixgbe_remove(stru
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
  
 +      if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 +              ixgbe_disable_sriov(adapter);
 +
        ixgbe_clear_interrupt_scheme(adapter);
  
        ixgbe_release_hw_control(adapter);
diff --combined include/linux/pci.h
index 59a98e2ee2c69c5b32ec70e5cec2b4da210a3283,c1968f464c389fa5ad6529d1eaab7edf59400284..e2575f86133afb5d9a4c9328c5998ff04ba0b1ec
@@@ -612,9 -612,6 +612,9 @@@ extern void pci_remove_bus_device(struc
  extern void pci_stop_bus_device(struct pci_dev *dev);
  void pci_setup_cardbus(struct pci_bus *bus);
  extern void pci_sort_breadthfirst(void);
 +#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
 +#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
 +#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
  
  /* Generic PCI functions exported to card drivers */
  
@@@ -759,6 -756,10 +759,10 @@@ pci_power_t pci_target_state(struct pci
  int pci_prepare_to_sleep(struct pci_dev *dev);
  int pci_back_from_sleep(struct pci_dev *dev);
  
+ /* For use by arch with custom probe code */
+ void set_pcie_port_type(struct pci_dev *pdev);
+ void set_pcie_hotplug_bridge(struct pci_dev *pdev);
  /* Functions for PCI Hotplug drivers to use */
  int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
  #ifdef CONFIG_HOTPLUG
@@@ -1132,9 -1133,6 +1136,9 @@@ static inline struct pci_dev *pci_get_b
                                                unsigned int devfn)
  { return NULL; }
  
 +#define dev_is_pci(d) (false)
 +#define dev_is_pf(d) (false)
 +#define dev_num_vf(d) (0)
  #endif /* CONFIG_PCI */
  
  /* Include architecture-dependent settings and functions */
@@@ -1292,7 -1290,6 +1296,7 @@@ void __iomem *pci_ioremap_bar(struct pc
  extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
  extern void pci_disable_sriov(struct pci_dev *dev);
  extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
 +extern int pci_num_vf(struct pci_dev *dev);
  #else
  static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
  {
@@@ -1305,10 -1302,6 +1309,10 @@@ static inline irqreturn_t pci_sriov_mig
  {
        return IRQ_NONE;
  }
 +static inline int pci_num_vf(struct pci_dev *dev)
 +{
 +      return 0;
 +}
  #endif
  
  #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
diff --combined net/core/ethtool.c
index 82cae3bca78ddc76864a3c5b536e4bf01c797112,236a9988ea91443ebee44141935239a267c2b027..d08a0c7675bf1fd6840837c0b7ec718b40375d93
@@@ -120,7 -120,7 +120,7 @@@ int ethtool_op_set_ufo(struct net_devic
   * NETIF_F_xxx values in include/linux/netdevice.h
   */
  static const u32 flags_dup_features =
 -      ETH_FLAG_LRO;
 +      (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
  
  u32 ethtool_op_get_flags(struct net_device *dev)
  {
  
  int ethtool_op_set_flags(struct net_device *dev, u32 data)
  {
 +      const struct ethtool_ops *ops = dev->ethtool_ops;
 +
        if (data & ETH_FLAG_LRO)
                dev->features |= NETIF_F_LRO;
        else
                dev->features &= ~NETIF_F_LRO;
  
 +      if (data & ETH_FLAG_NTUPLE) {
 +              if (!ops->set_rx_ntuple)
 +                      return -EOPNOTSUPP;
 +              dev->features |= NETIF_F_NTUPLE;
 +      } else {
 +              /* safe to clear regardless */
 +              dev->features &= ~NETIF_F_NTUPLE;
 +      }
 +
        return 0;
  }
  
 +void ethtool_ntuple_flush(struct net_device *dev)
 +{
 +      struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
 +
 +      list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
 +              list_del(&fsc->list);
 +              kfree(fsc);
 +      }
 +      dev->ethtool_ntuple_list.count = 0;
 +}
 +EXPORT_SYMBOL(ethtool_ntuple_flush);
 +
  /* Handlers for each ethtool command */
  
  static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
  {
 -      struct ethtool_cmd cmd = { ETHTOOL_GSET };
 +      struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
        int err;
  
        if (!dev->ethtool_ops->get_settings)
@@@ -197,10 -174,7 +197,10 @@@ static int ethtool_set_settings(struct 
        return dev->ethtool_ops->set_settings(dev, &cmd);
  }
  
 -static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
 +/*
 + * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
 + */
 +static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
  {
        struct ethtool_drvinfo info;
        const struct ethtool_ops *ops = dev->ethtool_ops;
        return 0;
  }
  
 -static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
 +/*
 + * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
 + */
 +static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
  {
        struct ethtool_rxnfc cmd;
  
        return dev->ethtool_ops->set_rxnfc(dev, &cmd);
  }
  
 -static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
 +/*
 + * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
 + */
 +static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
  {
        struct ethtool_rxnfc info;
        const struct ethtool_ops *ops = dev->ethtool_ops;
@@@ -298,315 -266,6 +298,315 @@@ err_out
        return ret;
  }
  
 +static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
 +                              struct ethtool_rx_ntuple_flow_spec *spec,
 +                              struct ethtool_rx_ntuple_flow_spec_container *fsc)
 +{
 +
 +      /* don't add filters forever */
 +      if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
 +              /* free the container */
 +              kfree(fsc);
 +              return;
 +      }
 +
 +      /* Copy the whole filter over */
 +      fsc->fs.flow_type = spec->flow_type;
 +      memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
 +      memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
 +
 +      fsc->fs.vlan_tag = spec->vlan_tag;
 +      fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
 +      fsc->fs.data = spec->data;
 +      fsc->fs.data_mask = spec->data_mask;
 +      fsc->fs.action = spec->action;
 +
 +      /* add to the list */
 +      list_add_tail_rcu(&fsc->list, &list->list);
 +      list->count++;
 +}
 +
 +/*
 + * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
 + */
 +static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
 +{
 +      struct ethtool_rx_ntuple cmd;
 +      const struct ethtool_ops *ops = dev->ethtool_ops;
 +      struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
 +      int ret;
 +
 +      if (!(dev->features & NETIF_F_NTUPLE))
 +              return -EINVAL;
 +
 +      if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
 +              return -EFAULT;
 +
 +      /*
 +       * Cache filter in dev struct for GET operation only if
 +       * the underlying driver doesn't have its own GET operation, and
 +       * only if the filter was added successfully.  First make sure we
 +       * can allocate the filter, then continue if successful.
 +       */
 +      if (!ops->get_rx_ntuple) {
 +              fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
 +              if (!fsc)
 +                      return -ENOMEM;
 +      }
 +
 +      ret = ops->set_rx_ntuple(dev, &cmd);
 +      if (ret) {
 +              kfree(fsc);
 +              return ret;
 +      }
 +
 +      if (!ops->get_rx_ntuple)
 +              __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
 +
 +      return ret;
 +}
 +
 +static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
 +{
 +      struct ethtool_gstrings gstrings;
 +      const struct ethtool_ops *ops = dev->ethtool_ops;
 +      struct ethtool_rx_ntuple_flow_spec_container *fsc;
 +      u8 *data;
 +      char *p;
 +      int ret, i, num_strings = 0;
 +
 +      if (!ops->get_sset_count)
 +              return -EOPNOTSUPP;
 +
 +      if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
 +              return -EFAULT;
 +
 +      ret = ops->get_sset_count(dev, gstrings.string_set);
 +      if (ret < 0)
 +              return ret;
 +
 +      gstrings.len = ret;
 +
 +      data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
 +      if (!data)
 +              return -ENOMEM;
 +
 +      if (ops->get_rx_ntuple) {
 +              /* driver-specific filter grab */
 +              ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
 +              goto copy;
 +      }
 +
 +      /* default ethtool filter grab */
 +      i = 0;
 +      p = (char *)data;
 +      list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
 +              sprintf(p, "Filter %d:\n", i);
 +              p += ETH_GSTRING_LEN;
 +              num_strings++;
 +
 +              switch (fsc->fs.flow_type) {
 +              case TCP_V4_FLOW:
 +                      sprintf(p, "\tFlow Type: TCP\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case UDP_V4_FLOW:
 +                      sprintf(p, "\tFlow Type: UDP\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case SCTP_V4_FLOW:
 +                      sprintf(p, "\tFlow Type: SCTP\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case AH_ESP_V4_FLOW:
 +                      sprintf(p, "\tFlow Type: AH ESP\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case ESP_V4_FLOW:
 +                      sprintf(p, "\tFlow Type: ESP\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case IP_USER_FLOW:
 +                      sprintf(p, "\tFlow Type: Raw IP\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case IPV4_FLOW:
 +                      sprintf(p, "\tFlow Type: IPv4\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              default:
 +                      sprintf(p, "\tFlow Type: Unknown\n");
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      goto unknown_filter;
 +              };
 +
 +              /* now the rest of the filters */
 +              switch (fsc->fs.flow_type) {
 +              case TCP_V4_FLOW:
 +              case UDP_V4_FLOW:
 +              case SCTP_V4_FLOW:
 +                      sprintf(p, "\tSrc IP addr: 0x%x\n",
 +                              fsc->fs.h_u.tcp_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tSrc IP mask: 0x%x\n",
 +                              fsc->fs.m_u.tcp_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP addr: 0x%x\n",
 +                              fsc->fs.h_u.tcp_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP mask: 0x%x\n",
 +                              fsc->fs.m_u.tcp_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.tcp_ip4_spec.psrc,
 +                              fsc->fs.m_u.tcp_ip4_spec.psrc);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.tcp_ip4_spec.pdst,
 +                              fsc->fs.m_u.tcp_ip4_spec.pdst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tTOS: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.tcp_ip4_spec.tos,
 +                              fsc->fs.m_u.tcp_ip4_spec.tos);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case AH_ESP_V4_FLOW:
 +              case ESP_V4_FLOW:
 +                      sprintf(p, "\tSrc IP addr: 0x%x\n",
 +                              fsc->fs.h_u.ah_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tSrc IP mask: 0x%x\n",
 +                              fsc->fs.m_u.ah_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP addr: 0x%x\n",
 +                              fsc->fs.h_u.ah_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP mask: 0x%x\n",
 +                              fsc->fs.m_u.ah_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tSPI: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.ah_ip4_spec.spi,
 +                              fsc->fs.m_u.ah_ip4_spec.spi);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tTOS: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.ah_ip4_spec.tos,
 +                              fsc->fs.m_u.ah_ip4_spec.tos);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case IP_USER_FLOW:
 +                      sprintf(p, "\tSrc IP addr: 0x%x\n",
 +                              fsc->fs.h_u.raw_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tSrc IP mask: 0x%x\n",
 +                              fsc->fs.m_u.raw_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP addr: 0x%x\n",
 +                              fsc->fs.h_u.raw_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP mask: 0x%x\n",
 +                              fsc->fs.m_u.raw_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              case IPV4_FLOW:
 +                      sprintf(p, "\tSrc IP addr: 0x%x\n",
 +                              fsc->fs.h_u.usr_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tSrc IP mask: 0x%x\n",
 +                              fsc->fs.m_u.usr_ip4_spec.ip4src);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP addr: 0x%x\n",
 +                              fsc->fs.h_u.usr_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tDest IP mask: 0x%x\n",
 +                              fsc->fs.m_u.usr_ip4_spec.ip4dst);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
 +                              fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
 +                              fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tTOS: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.usr_ip4_spec.tos,
 +                              fsc->fs.m_u.usr_ip4_spec.tos);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.usr_ip4_spec.ip_ver,
 +                              fsc->fs.m_u.usr_ip4_spec.ip_ver);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
 +                              fsc->fs.h_u.usr_ip4_spec.proto,
 +                              fsc->fs.m_u.usr_ip4_spec.proto);
 +                      p += ETH_GSTRING_LEN;
 +                      num_strings++;
 +                      break;
 +              };
 +              sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
 +                      fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
 +              p += ETH_GSTRING_LEN;
 +              num_strings++;
 +              sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
 +              p += ETH_GSTRING_LEN;
 +              num_strings++;
 +              sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
 +              p += ETH_GSTRING_LEN;
 +              num_strings++;
 +              if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
 +                      sprintf(p, "\tAction: Drop\n");
 +              else
 +                      sprintf(p, "\tAction: Direct to queue %d\n",
 +                              fsc->fs.action);
 +              p += ETH_GSTRING_LEN;
 +              num_strings++;
 +unknown_filter:
 +              i++;
 +      }
 +copy:
 +      /* indicate to userspace how many strings we actually have */
 +      gstrings.len = num_strings;
 +      ret = -EFAULT;
 +      if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
 +              goto out;
 +      useraddr += sizeof(gstrings);
 +      if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
 +              goto out;
 +      ret = 0;
 +
 +out:
 +      kfree(data);
 +      return ret;
 +}
 +
  static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
  {
        struct ethtool_regs regs;
@@@ -654,9 -313,6 +654,9 @@@ static int ethtool_reset(struct net_dev
        if (copy_from_user(&reset, useraddr, sizeof(reset)))
                return -EFAULT;
  
 +      /* Clear ethtool n-tuple list */
 +      ethtool_ntuple_flush(dev);
 +
        ret = dev->ethtool_ops->reset(dev, &reset.data);
        if (ret)
                return ret;
  
  static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
  {
 -      struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
 +      struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
  
        if (!dev->ethtool_ops->get_wol)
                return -EOPNOTSUPP;
@@@ -800,12 -456,9 +800,12 @@@ static int ethtool_set_eeprom(struct ne
        return ret;
  }
  
 -static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
 +/*
 + * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
 + */
 +static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
  {
 -      struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
 +      struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
  
        if (!dev->ethtool_ops->get_coalesce)
                return -EOPNOTSUPP;
        return 0;
  }
  
 -static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
 +/*
 + * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
 + */
 +static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
  {
        struct ethtool_coalesce coalesce;
  
  
  static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
  {
 -      struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
 +      struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
  
        if (!dev->ethtool_ops->get_ringparam)
                return -EOPNOTSUPP;
@@@ -1189,7 -839,7 +1189,7 @@@ static int ethtool_get_perm_addr(struc
  static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
                             u32 cmd, u32 (*actor)(struct net_device *))
  {
 -      struct ethtool_value edata = { cmd };
 +      struct ethtool_value edata = { .cmd = cmd };
  
        if (!actor)
                return -EOPNOTSUPP;
@@@ -1230,10 -880,7 +1230,10 @@@ static int ethtool_set_value(struct net
        return actor(dev, edata.data);
  }
  
 -static int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
 +/*
 + * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
 + */
 +static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
  {
        struct ethtool_flash efl;
  
@@@ -1280,6 -927,7 +1280,7 @@@ int dev_ethtool(struct net *net, struc
        case ETHTOOL_GPERMADDR:
        case ETHTOOL_GUFO:
        case ETHTOOL_GGSO:
+       case ETHTOOL_GGRO:
        case ETHTOOL_GFLAGS:
        case ETHTOOL_GPFLAGS:
        case ETHTOOL_GRXFH:
        case ETHTOOL_RESET:
                rc = ethtool_reset(dev, useraddr);
                break;
 +      case ETHTOOL_SRXNTUPLE:
 +              rc = ethtool_set_rx_ntuple(dev, useraddr);
 +              break;
 +      case ETHTOOL_GRXNTUPLE:
 +              rc = ethtool_get_rx_ntuple(dev, useraddr);
 +              break;
        default:
                rc = -EOPNOTSUPP;
        }
diff --combined net/ipv4/ipcomp.c
index b55a0c3df82f6daeb0cd811e2e9f84b4193dd26e,544ce0876f12dcd78cfcf0d777e1b78bab5b4acf..83ed71500898bd0eec4b925e54eb42ebc765e27b
@@@ -25,7 -25,6 +25,7 @@@
  
  static void ipcomp4_err(struct sk_buff *skb, u32 info)
  {
 +      struct net *net = dev_net(skb->dev);
        __be32 spi;
        struct iphdr *iph = (struct iphdr *)skb->data;
        struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
@@@ -36,7 -35,7 +36,7 @@@
                return;
  
        spi = htonl(ntohs(ipch->cpi));
 -      x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr,
 +      x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr,
                              spi, IPPROTO_COMP, AF_INET);
        if (!x)
                return;
  /* We always hold one tunnel user reference to indicate a tunnel */
  static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
  {
 +      struct net *net = xs_net(x);
        struct xfrm_state *t;
  
 -      t = xfrm_state_alloc(&init_net);
 +      t = xfrm_state_alloc(net);
        if (t == NULL)
                goto out;
  
@@@ -84,11 -82,10 +84,11 @@@ error
   */
  static int ipcomp_tunnel_attach(struct xfrm_state *x)
  {
 +      struct net *net = xs_net(x);
        int err = 0;
        struct xfrm_state *t;
  
 -      t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4,
 +      t = xfrm_state_lookup(net, (xfrm_address_t *)&x->id.daddr.a4,
                              x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
        if (!t) {
                t = ipcomp_tunnel_create(x);
@@@ -127,16 -124,12 +127,12 @@@ static int ipcomp4_init_state(struct xf
        if (x->props.mode == XFRM_MODE_TUNNEL) {
                err = ipcomp_tunnel_attach(x);
                if (err)
-                       goto error_tunnel;
+                       goto out;
        }
  
        err = 0;
  out:
        return err;
- error_tunnel:
-       ipcomp_destroy(x);
-       goto out;
  }
  
  static const struct xfrm_type ipcomp_type = {
diff --combined net/ipv6/ipcomp6.c
index a9fbb151bb7998c507a12f4f052a63465f81059a,002e6eef91204ea9f21e49a36660fc0e3952fd81..bb42f39c1db8804b4e2cc593910fb5d87de69fb4
@@@ -53,7 -53,6 +53,7 @@@
  static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                                u8 type, u8 code, int offset, __be32 info)
  {
 +      struct net *net = dev_net(skb->dev);
        __be32 spi;
        struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
        struct ip_comp_hdr *ipcomph =
@@@ -64,7 -63,7 +64,7 @@@
                return;
  
        spi = htonl(ntohs(ipcomph->cpi));
 -      x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
 +      x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
        if (!x)
                return;
  
  
  static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
  {
 +      struct net *net = xs_net(x);
        struct xfrm_state *t = NULL;
  
 -      t = xfrm_state_alloc(&init_net);
 +      t = xfrm_state_alloc(net);
        if (!t)
                goto out;
  
        t->id.proto = IPPROTO_IPV6;
 -      t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr);
 +      t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
        if (!t->id.spi)
                goto error;
  
@@@ -110,14 -108,13 +110,14 @@@ error
  
  static int ipcomp6_tunnel_attach(struct xfrm_state *x)
  {
 +      struct net *net = xs_net(x);
        int err = 0;
        struct xfrm_state *t = NULL;
        __be32 spi;
  
 -      spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr);
 +      spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr);
        if (spi)
 -              t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr,
 +              t = xfrm_state_lookup(net, (xfrm_address_t *)&x->id.daddr,
                                              spi, IPPROTO_IPV6, AF_INET6);
        if (!t) {
                t = ipcomp6_tunnel_create(x);
@@@ -157,16 -154,12 +157,12 @@@ static int ipcomp6_init_state(struct xf
        if (x->props.mode == XFRM_MODE_TUNNEL) {
                err = ipcomp6_tunnel_attach(x);
                if (err)
-                       goto error_tunnel;
+                       goto out;
        }
  
        err = 0;
  out:
        return err;
- error_tunnel:
-       ipcomp_destroy(x);
-       goto out;
  }
  
  static const struct xfrm_type ipcomp6_type =
diff --combined net/xfrm/xfrm_state.c
index 96f2088e744848ed42b173f20ef4f91db5b951e0,f445ea1c5f52b46392a2860c4381d626da893ff3..9fa3322b2a7dda786922f472b6a49a369c40f1ac
@@@ -603,14 -603,13 +603,14 @@@ xfrm_state_flush_secctx_check(struct ne
  
  int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
  {
 -      int i, err = 0;
 +      int i, err = 0, cnt = 0;
  
        spin_lock_bh(&xfrm_state_lock);
        err = xfrm_state_flush_secctx_check(net, proto, audit_info);
        if (err)
                goto out;
  
 +      err = -ESRCH;
        for (i = 0; i <= net->xfrm.state_hmask; i++) {
                struct hlist_node *entry;
                struct xfrm_state *x;
@@@ -627,16 -626,13 +627,16 @@@ restart
                                                        audit_info->sessionid,
                                                        audit_info->secid);
                                xfrm_state_put(x);
 +                              if (!err)
 +                                      cnt++;
  
                                spin_lock_bh(&xfrm_state_lock);
                                goto restart;
                        }
                }
        }
 -      err = 0;
 +      if (cnt)
 +              err = 0;
  
  out:
        spin_unlock_bh(&xfrm_state_lock);
@@@ -1106,7 -1102,7 +1106,7 @@@ static struct xfrm_state *xfrm_state_cl
        int err = -ENOMEM;
        struct xfrm_state *x = xfrm_state_alloc(net);
        if (!x)
-               goto error;
+               goto out;
  
        memcpy(&x->id, &orig->id, sizeof(x->id));
        memcpy(&x->sel, &orig->sel, sizeof(x->sel));
        return x;
  
   error:
+       xfrm_state_put(x);
+ out:
        if (errp)
                *errp = err;
-       if (x) {
-               kfree(x->aalg);
-               kfree(x->ealg);
-               kfree(x->calg);
-               kfree(x->encap);
-               kfree(x->coaddr);
-       }
-       kfree(x);
        return NULL;
  }
  
@@@ -1462,12 -1452,12 +1456,12 @@@ EXPORT_SYMBOL(xfrm_find_acq_byseq)
  u32 xfrm_get_acqseq(void)
  {
        u32 res;
 -      static u32 acqseq;
 -      static DEFINE_SPINLOCK(acqseq_lock);
 +      static atomic_t acqseq;
 +
 +      do {
 +              res = atomic_inc_return(&acqseq);
 +      } while (!res);
  
 -      spin_lock_bh(&acqseq_lock);
 -      res = (++acqseq ? : ++acqseq);
 -      spin_unlock_bh(&acqseq_lock);
        return res;
  }
  EXPORT_SYMBOL(xfrm_get_acqseq);
This page took 0.225428 seconds and 4 git commands to generate.