#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.44-k2"
+#define DRV_VERSION "2.0.62-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
};
#endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+ "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+ if (adapter->vfinfo)
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
{
u32 rxctrl;
int cpu = get_cpu();
- int q = rx_ring - adapter->rx_ring;
+ int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
{
u32 txctrl;
int cpu = get_cpu();
- int q = tx_ring - adapter->tx_ring;
+ int q = tx_ring->reg_idx;
struct ixgbe_hw *hw = &adapter->hw;
if (tx_ring->cpu != cpu) {
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].cpu = -1;
- ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+ adapter->tx_ring[i]->cpu = -1;
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].cpu = -1;
- ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+ adapter->rx_ring[i]->cpu = -1;
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
}
}
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
+ j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
+ j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ if (adapter->num_vfs)
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+ else
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
if (hw->mac.type == ixgbe_mac_82598EB)
ixgbe_check_fan_failure(adapter, eicr);
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- &adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
long r_idx;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, rx_ring);
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, ring);
budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, ring);
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
long r_idx;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, tx_ring);
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
- struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ if (adapter->num_vfs)
+ mask |= IXGBE_EIMS_MAILBOX;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
+
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
}
/**
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0].total_packets = 0;
- adapter->tx_ring[0].total_bytes = 0;
- adapter->rx_ring[0].total_packets = 0;
- adapter->rx_ring[0].total_bytes = 0;
+ adapter->tx_ring[0]->total_packets = 0;
+ adapter->tx_ring[0]->total_bytes = 0;
+ adapter->rx_ring[0]->total_packets = 0;
+ adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ if (adapter->num_vfs > 32)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = &adapter->tx_ring[i];
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
j = ring->reg_idx;
tdba = ring->dma;
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_TDH(j);
- adapter->tx_ring[i].tail = IXGBE_TDT(j);
+ adapter->tx_ring[i]->head = IXGBE_TDH(j);
+ adapter->tx_ring[i]->tail = IXGBE_TDT(j);
/*
* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
if (hw->mac.type == ixgbe_mac_82599EB) {
u32 rttdcs;
+ u32 mask;
/* disable the arbiter while setting MTQC */
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
rttdcs |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- /* We enable 8 traffic classes, DCB only */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
- IXGBE_MTQC_8TC_8TQ));
- else
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
/* re-eable the arbiter */
rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
#ifdef CONFIG_IXGBE_DCB
| IXGBE_FLAG_DCB_ENABLED
#endif
+ | IXGBE_FLAG_SRIOV_ENABLED
);
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
mrqc = IXGBE_MRQC_RSSEN;
break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
#ifdef CONFIG_IXGBE_DCB
case (IXGBE_FLAG_DCB_ENABLED):
mrqc = IXGBE_MRQC_RT8TCEN;
u32 rscctrl;
int rx_buf_len;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
j = rx_ring->reg_idx;
rx_buf_len = rx_ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
int rx_buf_len;
/* Decide whether to use packet split mode or not */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (!adapter->num_vfs)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PSRTYPE(adapter->num_vfs),
+ psrtype);
}
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
#endif
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
+ rx_ring = adapter->rx_ring[i];
rdba = rx_ring->dma;
j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+ | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs <<
+ IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = adapter->num_vfs / 32;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ ixgbe_set_vmolr(hw, adapter->num_vfs);
+ }
+
/* Program MRQC for the distribution of queues */
mrqc = ixgbe_setup_mrqc(adapter);
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if (adapter->num_vfs) {
+ u32 reg;
+
+ /* Map PF MAC address in RAR Entry 0 to first pool
+ * following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /* Set up VF register offsets for selected VT Mode, i.e.
+ * 64 VFs for SR-IOV */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ reg |= IXGBE_GCR_EXT_SRIOV;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+ }
+
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
}
static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
ixgbe_irq_enable(adapter);
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
static void ixgbe_vlan_rx_register(struct net_device *netdev,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 ctrl;
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
* responsible for configuring the hardware for proper unicast, multicast and
* promiscuous mode.
**/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
+ hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
+ if (adapter->num_vfs)
+ ixgbe_restore_vf_multicasts(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl |= 32;
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
#endif /* IXGBE_FCOE */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].atr_sample_rate =
+ adapter->tx_ring[i]->atr_sample_rate =
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
- (adapter->rx_ring[i].count - 1));
+ ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
+ (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
int rxr)
{
- int j = adapter->rx_ring[rxr].reg_idx;
+ int j = adapter->rx_ring[rxr]->reg_idx;
int k;
for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
"not set within the polling period\n", rxr);
}
- ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr]->count - 1));
}
static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
u32 txdctl, rxdctl, mhadd;
u32 dmatxctl;
u32 gpie;
+ u32 ctrl_ext;
ixgbe_get_hw_control(adapter);
/* MSI only */
gpie = 0;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ int wait_loop = 10;
+ /* poll for Tx Enable ready */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+ } while (--wait_loop &&
+ !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ DPRINTK(DRV, ERR, "Could not enable "
+ "Tx Queue %d\n", j);
+ }
}
for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH=0 descriptors (to minimize latency on fetch),
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
/* enable transmits */
netif_tx_start_all_queues(netdev);
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
return 0;
}
}
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
}
/**
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
+ /* disable receive for all VFs and wait one second */
+ if (adapter->num_vfs) {
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+ }
+
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
netif_carrier_off(netdev);
+ /* clear n-tuple filters that are cached */
+ ethtool_ntuple_flush(netdev);
+
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
}
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
- ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
+ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
}
#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
**/
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
+
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
} else {
ret = false;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* the number of queues is assumed to be symmetric */
for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i].reg_idx = i << 3;
- adapter->tx_ring[i].reg_idx = i << 2;
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
}
ret = true;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
* Rx TC0-TC7 are offset by 16 queues each
*/
for (i = 0; i < 3; i++) {
- adapter->tx_ring[i].reg_idx = i << 5;
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < 5; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 2) << 4);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 8) << 3);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
ret = true;
*
* Rx TC0-TC3 are offset by 32 queues each
*/
- adapter->tx_ring[0].reg_idx = 0;
- adapter->tx_ring[1].reg_idx = 64;
- adapter->tx_ring[2].reg_idx = 96;
- adapter->tx_ring[3].reg_idx = 112;
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i].reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 5;
ret = true;
} else {
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
ixgbe_cache_ring_dcb(adapter);
/* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
/*
* In 82599, the number of Tx queues for each traffic
* class for both 8-TC and 4-TC modes are:
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
ret = true;
}
}
#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
- adapter->rx_ring[0].reg_idx = 0;
- adapter->tx_ring[0].reg_idx = 0;
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
-
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_tx_ring_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->tx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_rx_ring_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->rx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
ixgbe_cache_ring_register(adapter);
return 0;
err_rx_ring_allocation:
- kfree(adapter->tx_ring);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ kfree(adapter->tx_ring[i]);
err_tx_ring_allocation:
return -ENOMEM;
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_set_num_queues(adapter);
err = pci_enable_msi(adapter->pdev);
}
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ if (dev->features & NETIF_F_NTUPLE) {
+ /* Flow Director perfect filter enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ } else {
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ }
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->atr_sample_rate = 20;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
/* enable rx csum by default */
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
memset(tx_ring->tx_buffer_info, 0, size);
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
}
/**
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
}
/**
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i].rsc_count;
- rsc_flush += adapter->rx_ring[i].rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i]->restart_queue;
adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters \n");
netif_tx_start_all_queues(adapter->netdev);
}
+static DEFINE_MUTEX(ixgbe_watchdog_lock);
+
/**
* ixgbe_watchdog_task - worker thread to bring link up
* @work: pointer to work_struct containing our data
watchdog_task);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = adapter->link_speed;
- bool link_up = adapter->link_up;
+ u32 link_speed;
+ bool link_up;
int i;
struct ixgbe_ring *tx_ring;
int some_tx_pending = 0;
- adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_lock(&ixgbe_watchdog_lock);
+
+ link_up = adapter->link_up;
+ link_speed = adapter->link_speed;
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (!netif_carrier_ok(netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = &adapter->tx_ring[i];
+ tx_ring = adapter->tx_ring[i];
if (tx_ring->next_to_use != tx_ring->next_to_clean) {
some_tx_pending = 1;
break;
}
ixgbe_update_stats(adapter);
- adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_unlock(&ixgbe_watchdog_lock);
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
}
}
- tx_ring = &adapter->tx_ring[skb->queue_mapping];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
return 0;
}
#endif /* IXGBE_FCOE */
};
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for VF "
+ "Data Storage - SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
if (err)
goto err_sw_init;
+ /* Make it possible the adapter to be woken up via WOL */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
/*
* If there is a fan on this device and it has failed log the
* failure.
goto err_sw_init;
}
+ ixgbe_probe_vf(adapter, ii);
+
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
ixgbe_setup_dca(adapter);
}
#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+ adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->sfp_task);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
* NETIF_F_xxx values in include/linux/netdevice.h
*/
static const u32 flags_dup_features =
- ETH_FLAG_LRO;
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
u32 ethtool_op_get_flags(struct net_device *dev)
{
int ethtool_op_set_flags(struct net_device *dev, u32 data)
{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
if (data & ETH_FLAG_LRO)
dev->features |= NETIF_F_LRO;
else
dev->features &= ~NETIF_F_LRO;
+ if (data & ETH_FLAG_NTUPLE) {
+ if (!ops->set_rx_ntuple)
+ return -EOPNOTSUPP;
+ dev->features |= NETIF_F_NTUPLE;
+ } else {
+ /* safe to clear regardless */
+ dev->features &= ~NETIF_F_NTUPLE;
+ }
+
return 0;
}
+void ethtool_ntuple_flush(struct net_device *dev)
+{
+ struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
+
+ list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
+ list_del(&fsc->list);
+ kfree(fsc);
+ }
+ dev->ethtool_ntuple_list.count = 0;
+}
+EXPORT_SYMBOL(ethtool_ntuple_flush);
+
/* Handlers for each ethtool command */
static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
int err;
if (!dev->ethtool_ops->get_settings)
return dev->ethtool_ops->set_settings(dev, &cmd);
}
-static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
{
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
return 0;
}
-static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc cmd;
return dev->ethtool_ops->set_rxnfc(dev, &cmd);
}
-static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc info;
const struct ethtool_ops *ops = dev->ethtool_ops;
return ret;
}
+static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
+ struct ethtool_rx_ntuple_flow_spec *spec,
+ struct ethtool_rx_ntuple_flow_spec_container *fsc)
+{
+
+ /* don't add filters forever */
+ if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
+ /* free the container */
+ kfree(fsc);
+ return;
+ }
+
+ /* Copy the whole filter over */
+ fsc->fs.flow_type = spec->flow_type;
+ memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
+ memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
+
+ fsc->fs.vlan_tag = spec->vlan_tag;
+ fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
+ fsc->fs.data = spec->data;
+ fsc->fs.data_mask = spec->data_mask;
+ fsc->fs.action = spec->action;
+
+ /* add to the list */
+ list_add_tail_rcu(&fsc->list, &list->list);
+ list->count++;
+}
+
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rx_ntuple cmd;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
+ int ret;
+
+ if (!(dev->features & NETIF_F_NTUPLE))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ /*
+ * Cache filter in dev struct for GET operation only if
+ * the underlying driver doesn't have its own GET operation, and
+ * only if the filter was added successfully. First make sure we
+ * can allocate the filter, then continue if successful.
+ */
+ if (!ops->get_rx_ntuple) {
+ fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
+ if (!fsc)
+ return -ENOMEM;
+ }
+
+ ret = ops->set_rx_ntuple(dev, &cmd);
+ if (ret) {
+ kfree(fsc);
+ return ret;
+ }
+
+ if (!ops->get_rx_ntuple)
+ __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
+
+ return ret;
+}
+
+static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rx_ntuple_flow_spec_container *fsc;
+ u8 *data;
+ char *p;
+ int ret, i, num_strings = 0;
+
+ if (!ops->get_sset_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ ret = ops->get_sset_count(dev, gstrings.string_set);
+ if (ret < 0)
+ return ret;
+
+ gstrings.len = ret;
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ if (ops->get_rx_ntuple) {
+ /* driver-specific filter grab */
+ ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
+ goto copy;
+ }
+
+ /* default ethtool filter grab */
+ i = 0;
+ p = (char *)data;
+ list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
+ sprintf(p, "Filter %d:\n", i);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+
+ switch (fsc->fs.flow_type) {
+ case TCP_V4_FLOW:
+ sprintf(p, "\tFlow Type: TCP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case UDP_V4_FLOW:
+ sprintf(p, "\tFlow Type: UDP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case SCTP_V4_FLOW:
+ sprintf(p, "\tFlow Type: SCTP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case AH_ESP_V4_FLOW:
+ sprintf(p, "\tFlow Type: AH ESP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case ESP_V4_FLOW:
+ sprintf(p, "\tFlow Type: ESP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IP_USER_FLOW:
+ sprintf(p, "\tFlow Type: Raw IP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IPV4_FLOW:
+ sprintf(p, "\tFlow Type: IPv4\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ default:
+ sprintf(p, "\tFlow Type: Unknown\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ goto unknown_filter;
+ };
+
+ /* now the rest of the filters */
+ switch (fsc->fs.flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.tcp_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.tcp_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.psrc,
+ fsc->fs.m_u.tcp_ip4_spec.psrc);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.pdst,
+ fsc->fs.m_u.tcp_ip4_spec.pdst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.tos,
+ fsc->fs.m_u.tcp_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case AH_ESP_V4_FLOW:
+ case ESP_V4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.ah_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.ah_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSPI: %d, mask: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.spi,
+ fsc->fs.m_u.ah_ip4_spec.spi);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.tos,
+ fsc->fs.m_u.ah_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IP_USER_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.raw_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.raw_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.raw_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.raw_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IPV4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.usr_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.usr_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
+ fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.tos,
+ fsc->fs.m_u.usr_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip_ver,
+ fsc->fs.m_u.usr_ip4_spec.ip_ver);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.proto,
+ fsc->fs.m_u.usr_ip4_spec.proto);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ };
+ sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
+ fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ sprintf(p, "\tAction: Drop\n");
+ else
+ sprintf(p, "\tAction: Direct to queue %d\n",
+ fsc->fs.action);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+unknown_filter:
+ i++;
+ }
+copy:
+ /* indicate to userspace how many strings we actually have */
+ gstrings.len = num_strings;
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
{
struct ethtool_regs regs;
if (copy_from_user(&reset, useraddr, sizeof(reset)))
return -EFAULT;
+ /* Clear ethtool n-tuple list */
+ ethtool_ntuple_flush(dev);
+
ret = dev->ethtool_ops->reset(dev, &reset.data);
if (ret)
return ret;
static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
if (!dev->ethtool_ops->get_wol)
return -EOPNOTSUPP;
return ret;
}
-static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+ struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
if (!dev->ethtool_ops->get_coalesce)
return -EOPNOTSUPP;
return 0;
}
-static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
{
struct ethtool_coalesce coalesce;
static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+ struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
if (!dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
u32 cmd, u32 (*actor)(struct net_device *))
{
- struct ethtool_value edata = { cmd };
+ struct ethtool_value edata = { .cmd = cmd };
if (!actor)
return -EOPNOTSUPP;
return actor(dev, edata.data);
}
-static int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
{
struct ethtool_flash efl;
case ETHTOOL_GPERMADDR:
case ETHTOOL_GUFO:
case ETHTOOL_GGSO:
+ case ETHTOOL_GGRO:
case ETHTOOL_GFLAGS:
case ETHTOOL_GPFLAGS:
case ETHTOOL_GRXFH:
case ETHTOOL_RESET:
rc = ethtool_reset(dev, useraddr);
break;
+ case ETHTOOL_SRXNTUPLE:
+ rc = ethtool_set_rx_ntuple(dev, useraddr);
+ break;
+ case ETHTOOL_GRXNTUPLE:
+ rc = ethtool_get_rx_ntuple(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}