F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
S: Maintained
S: Supported
F: drivers/net/wireless/ath/ath9k/
-ATHEROS AR9170 WIRELESS DRIVER
-W: http://wireless.kernel.org/en/users/Drivers/ar9170
-S: Obsolete
-F: drivers/net/wireless/ath/ar9170/
-
CARL9170 LINUX COMMUNITY WIRELESS DRIVER
F: drivers/net/wimax/i2400m/
F: include/linux/wimax/i2400m.h
+INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
+S: Supported
+F: drivers/net/wireless/iwlegacy/
+
INTEL WIRELESS WIFI LINK (iwlwifi)
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+ #define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
void
bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
- u32 pgnum, pgoff;
+ u32 pgnum;
u32 loff = 0;
int i;
u32 *fwsig = (u32 *) fwhdr;
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
- pgoff = bfa_ioc_smem_pgoff(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
* execution context (driver/bios) must match.
*/
static bool
- bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
if (fwhdr.signature != drv_fwhdr->signature)
return false;
- if (fwhdr.exec != drv_fwhdr->exec)
+ if (swab32(fwhdr.param) != boot_env)
return false;
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
{
enum bfi_ioc_state ioc_fwstate;
bool fwvalid;
+ u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ boot_env = BFI_BOOT_LOADER_OS;
+
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
- false : bfa_ioc_fwver_valid(ioc);
+ false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
return;
}
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
}
void
*/
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
- u32 boot_param)
+ u32 boot_env)
{
u32 *fwimg;
- u32 pgnum, pgoff;
+ u32 pgnum;
u32 loff = 0;
u32 chunkno = 0;
u32 i;
fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
- pgoff = bfa_ioc_smem_pgoff(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
/*
* Set boot type and boot param at the end.
*/
- writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ writel(boot_type, ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_TYPE_OFF)));
- writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
- + (BFI_BOOT_PARAM_OFF)));
+ writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_LOADER_OFF)));
}
static void
* as the entry vector.
*/
static void
- bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
{
void __iomem *rb;
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
- if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
} else {
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_param);
+ bfa_ioc_download_fw(ioc, boot_type, boot_env);
/**
* Enable interrupts just before starting LPU
skb_put(skb, ntohs(cmpl->length));
if (likely
- (bnad->rx_csum &&
+ ((bnad->netdev->features & NETIF_F_RXCSUM) &&
(((flags & BNA_CQ_EF_IPV4) &&
(flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
(flags & BNA_CQ_EF_IPV6)) &&
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
- rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
{
struct net_device *netdev = bnad->netdev;
- netdev->features |= NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
- netdev->features |= NETIF_F_GRO;
- pr_warn("bna: GRO enabled, using kernel stack GRO\n");
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |=
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
-
- netdev->vlan_features = netdev->features;
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
bnad->txq_depth = BNAD_TXQ_DEPTH;
bnad->rxq_depth = BNAD_RXQ_DEPTH;
- bnad->rx_csum = true;
bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
/* Dual Media boards present all available port types */
cmd->supported = bp->port.supported[cfg_idx] |
(bp->port.supported[cfg_idx ^ 1] &
if ((bp->state == BNX2X_STATE_OPEN) &&
!(bp->flags & MF_FUNC_DIS) &&
(bp->link_vars.link_up)) {
- cmd->speed = bp->link_vars.line_speed;
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
cmd->duplex = bp->link_vars.duplex;
} else {
-
- cmd->speed = bp->link_params.req_line_speed[cfg_idx];
+ ethtool_cmd_speed_set(
+ cmd, bp->link_params.req_line_speed[cfg_idx]);
cmd->duplex = bp->link_params.req_duplex[cfg_idx];
}
if (IS_MF(bp))
- cmd->speed = bnx2x_get_mf_speed(bp);
+ ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
cmd->maxrxpkt = 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
- DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
+ DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n"
DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
return 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
- " supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
" duplex %d port %d phy_address %d transceiver %d\n"
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
- cmd->speed_hi,
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
- speed = cmd->speed;
- speed |= (cmd->speed_hi << 16);
+ speed = ethtool_cmd_speed(cmd);
if (IS_MF_SI(bp)) {
u32 part;
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed);
+ DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
return -EINVAL;
}
return 0;
}
-static int bnx2x_set_flags(struct net_device *dev, u32 data)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int changed = 0;
- int rc = 0;
-
- if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
- return -EAGAIN;
- }
-
- if (!(data & ETH_FLAG_RXVLAN))
- return -EINVAL;
-
- if ((data & ETH_FLAG_LRO) && bp->rx_csum && bp->disable_tpa)
- return -EINVAL;
-
- rc = ethtool_op_set_flags(dev, data, ETH_FLAG_LRO | ETH_FLAG_RXVLAN |
- ETH_FLAG_TXVLAN | ETH_FLAG_RXHASH);
- if (rc)
- return rc;
-
- /* TPA requires Rx CSUM offloading */
- if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
- if (!(bp->flags & TPA_ENABLE_FLAG)) {
- bp->flags |= TPA_ENABLE_FLAG;
- changed = 1;
- }
- } else if (bp->flags & TPA_ENABLE_FLAG) {
- dev->features &= ~NETIF_F_LRO;
- bp->flags &= ~TPA_ENABLE_FLAG;
- changed = 1;
- }
-
- if (changed && netif_running(dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
-
- return rc;
-}
-
-static u32 bnx2x_get_rx_csum(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- return bp->rx_csum;
-}
-
-static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int rc = 0;
-
- if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
- return -EAGAIN;
- }
-
- bp->rx_csum = data;
-
- /* Disable TPA, when Rx CSUM is disabled. Otherwise all
- TPA'ed packets will be discarded due to wrong TCP CSUM */
- if (!data) {
- u32 flags = ethtool_op_get_flags(dev);
-
- rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
- }
-
- return rc;
-}
-
-static int bnx2x_set_tso(struct net_device *dev, u32 data)
-{
- if (data) {
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
- dev->features |= NETIF_F_TSO6;
- } else {
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
- dev->features &= ~NETIF_F_TSO6;
- }
-
- return 0;
-}
-
static const struct {
char string[ETH_GSTRING_LEN];
} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
}
}
-static int bnx2x_phys_id(struct net_device *dev, u32 data)
+static int bnx2x_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct bnx2x *bp = netdev_priv(dev);
- int i;
if (!netif_running(dev))
- return 0;
+ return -EAGAIN;
if (!bp->port.pmf)
- return 0;
+ return -EOPNOTSUPP;
- if (data == 0)
- data = 2;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
- for (i = 0; i < (data * 2); i++) {
- if ((i % 2) == 0)
- bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_ON, SPEED_1000);
- else
- bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_FRONT_PANEL_OFF, 0);
+ case ETHTOOL_ID_ON:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OPER, SPEED_1000);
++ LED_MODE_ON, SPEED_1000);
+ break;
- msleep_interruptible(500);
- if (signal_pending(current))
- break;
- }
+ case ETHTOOL_ID_OFF:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OFF, 0);
++ LED_MODE_FRONT_PANEL_OFF, 0);
+
+ break;
- bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OPER, bp->link_vars.line_speed);
+ case ETHTOOL_ID_INACTIVE:
- if (bp->link_vars.link_up)
- bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OPER,
- bp->link_vars.line_speed);
++ bnx2x_set_led(&bp->link_params, &bp->link_vars,
++ LED_MODE_OPER,
++ bp->link_vars.line_speed);
+ }
return 0;
}
.set_ringparam = bnx2x_set_ringparam,
.get_pauseparam = bnx2x_get_pauseparam,
.set_pauseparam = bnx2x_set_pauseparam,
- .get_rx_csum = bnx2x_get_rx_csum,
- .set_rx_csum = bnx2x_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .set_flags = bnx2x_set_flags,
- .get_flags = ethtool_op_get_flags,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = bnx2x_set_tso,
.self_test = bnx2x_self_test,
.get_sset_count = bnx2x_get_sset_count,
.get_strings = bnx2x_get_strings,
- .phys_id = bnx2x_phys_id,
+ .set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
#include <linux/io.h>
#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
#include "qlcnic_hdr.h"
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 15
-#define QLCNIC_LINUX_VERSIONID "5.0.15"
+#define _QLCNIC_LINUX_SUBVERSION 16
+#define QLCNIC_LINUX_VERSIONID "5.0.16"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
#define TX_IP_PKT 0x04
#define TX_TCP_LSO 0x05
#define TX_TCP_LSO6 0x06
-#define TX_IPSEC 0x07
-#define TX_IPSEC_CMD 0x0a
#define TX_TCPV6_PKT 0x0b
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
+ #define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
__le16 reserved;
__le32 buffer_length; /* allocated buffer length (usually 2K) */
__le64 addr_buffer;
-};
+} __packed;
/* opcode field in status_desc */
#define QLCNIC_SYN_OFFLOAD 0x03
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
#define QLCNIC_FW_IMAGE_REGION 0x74
+#define QLCNIC_BOOTLD_REGION 0X72
struct qlcnic_flt_header {
u16 version;
u16 len;
u8 reserved1;
u32 size;
u32 start_addr;
- u32 end_add;
+ u32 end_addr;
};
/* Magic number to let user know flash is programmed */
u64 length;
};
-struct qlcnic_recv_crb {
- u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
- u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
- u32 sw_int_mask[NUM_STS_DESC_RINGS];
-};
-
/* Following defines are for the state of the buffers */
#define QLCNIC_BUFFER_FREE 0
#define QLCNIC_BUFFER_BUSY 1
/* In rx_buffer, we do not need multiple fragments as is a single buffer */
struct qlcnic_rx_buffer {
- struct list_head list;
+ u16 ref_handle;
struct sk_buff *skb;
+ struct list_head list;
u64 dma;
- u16 ref_handle;
};
/* Board types */
#define QLCNIC_GBE 0x01
#define QLCNIC_XGBE 0x02
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+
+#define QLCNIC_INTR_DEFAULT 0x04
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+
+struct qlcnic_nic_intr_coalesce {
+ u8 type;
+ u8 sts_ring_mask;
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 flag;
+ u32 timer_out;
+};
+
/*
* One hardware_context{} per adapter
* contains interrupt info as well shared hardware info.
u8 linkup;
u16 port_type;
u16 board_type;
+
+ struct qlcnic_nic_intr_coalesce coal;
};
struct qlcnic_adapter_stats {
* be one Rcv Descriptor for normal packets, one for jumbo and may be others.
*/
struct qlcnic_host_rds_ring {
- u32 producer;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
u32 num_desc;
+ u32 producer;
u32 dma_size;
u32 skb_size;
u32 flags;
- void __iomem *crb_rcv_producer;
- struct rcv_desc *desc_head;
- struct qlcnic_rx_buffer *rx_buf_arr;
struct list_head free_list;
spinlock_t lock;
dma_addr_t phys_addr;
-};
+} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_sds_ring {
u32 consumer;
u32 num_desc;
void __iomem *crb_sts_consumer;
- void __iomem *crb_intr_mask;
struct status_desc *desc_head;
struct qlcnic_adapter *adapter;
struct napi_struct napi;
struct list_head free_list[NUM_RCV_DESC_RINGS];
+ void __iomem *crb_intr_mask;
int irq;
dma_addr_t phys_addr;
char name[IFNAMSIZ+4];
-};
+} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_tx_ring {
u32 producer;
- __le32 *hw_consumer;
u32 sw_consumer;
- void __iomem *crb_cmd_producer;
u32 num_desc;
-
- struct netdev_queue *txq;
-
- struct qlcnic_cmd_buffer *cmd_buf_arr;
+ void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ __le32 *hw_consumer;
+
dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr;
-};
+ struct netdev_queue *txq;
+} ____cacheline_internodealigned_in_smp;
/*
* Receive context. There is one such structure per instance of the
* present elsewhere.
*/
struct qlcnic_recv_context {
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
u32 state;
u16 context_id;
u16 virt_port;
- struct qlcnic_host_rds_ring *rds_rings;
- struct qlcnic_host_sds_ring *sds_rings;
};
/* HW context creation */
#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
-#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
-#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
-#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
-#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
-#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
-#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
-#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
-#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
-#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023
#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
__le32 ring_size; /* Ring entries */
__le16 msi_index;
__le16 rsvd; /* Padding */
-};
+} __packed;
struct qlcnic_hostrq_rds_ring {
__le64 host_phys_addr; /* Ring base addr */
__le64 buff_size; /* Packet buffer size */
__le32 ring_size; /* Ring entries */
__le32 ring_kind; /* Class of ring */
-};
+} __packed;
struct qlcnic_hostrq_rx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
- N hostrq_rds_rings
- N hostrq_sds_rings */
char data[0];
-};
+} __packed;
struct qlcnic_cardrsp_rds_ring{
__le32 host_producer_crb; /* Crb to use */
__le32 rsvd1; /* Padding */
-};
+} __packed;
struct qlcnic_cardrsp_sds_ring {
__le32 host_consumer_crb; /* Crb to use */
__le32 interrupt_crb; /* Crb to use */
-};
+} __packed;
struct qlcnic_cardrsp_rx_ctx {
/* These ring offsets are relative to data[0] below */
- N cardrsp_rds_rings
- N cardrs_sds_rings */
char data[0];
-};
+} __packed;
#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
(sizeof(HOSTRQ_RX) + \
__le64 host_phys_addr; /* Ring base addr */
__le32 ring_size; /* Ring entries */
__le32 rsvd; /* Padding */
-};
+} __packed;
struct qlcnic_hostrq_tx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
__le16 rsvd3; /* Padding */
struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
u8 reserved[128]; /* future expansion */
-};
+} __packed;
struct qlcnic_cardrsp_cds_ring {
__le32 host_producer_crb; /* Crb to use */
__le32 interrupt_crb; /* Crb to use */
-};
+} __packed;
struct qlcnic_cardrsp_tx_ctx {
__le32 host_ctx_state; /* Starting state */
u8 virt_port; /* Virtual/Logical id of port */
struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
u8 reserved[128]; /* future expansion */
-};
+} __packed;
#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
uint8_t mac_addr[ETH_ALEN+2];
};
-/*
- * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
- * adjusted based on configured MTU.
- */
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
-#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
-#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
-
-#define QLCNIC_INTR_DEFAULT 0x04
-
-union qlcnic_nic_intr_coalesce_data {
- struct {
- u16 rx_packets;
- u16 rx_time_us;
- u16 tx_packets;
- u16 tx_time_us;
- } data;
- u64 word;
-};
-
-struct qlcnic_nic_intr_coalesce {
- u16 stats_time_us;
- u16 rate_sample_time;
- u16 flags;
- u16 rsvd_1;
- u32 low_threshold;
- u32 high_threshold;
- union qlcnic_nic_intr_coalesce_data normal;
- union qlcnic_nic_intr_coalesce_data low;
- union qlcnic_nic_intr_coalesce_data high;
- union qlcnic_nic_intr_coalesce_data irq;
-};
-
#define QLCNIC_HOST_REQUEST 0x13
#define QLCNIC_REQUEST 0x14
/*
* Driver --> Firmware
*/
-#define QLCNIC_H2C_OPCODE_START 0
-#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
-#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
-#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
-#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
-#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
-#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
-#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
-#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
-#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
-#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
-#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
-#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
-#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
-#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
-#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
-#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
-#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
-#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
-#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
-#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
-#define QLCNIC_C2C_OPCODE 22
-#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
-#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
-#define QLCNIC_H2C_OPCODE_LAST 25
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
+#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
/*
* Firmware --> Driver
*/
-#define QLCNIC_C2H_OPCODE_START 128
-#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
-#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
-#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
-#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
-#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
-#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
-#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
-#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
-#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
-#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
-#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
-#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
-#define QLCNIC_C2H_OPCODE_LAST 142
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
__le64 qhdr;
__le64 req_hdr;
__le64 words[6];
-};
+} __packed;
struct qlcnic_mac_req {
u8 op;
struct qlcnic_vlan_req {
__le16 vlan_id;
__le16 rsvd[3];
-};
+} __packed;
struct qlcnic_ipaddr {
__be32 ipv4;
};
struct qlcnic_adapter {
- struct qlcnic_hardware_context ahw;
-
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
struct net_device *netdev;
struct pci_dev *pdev;
- struct list_head mac_list;
- spinlock_t tx_clean_lock;
- spinlock_t mac_learn_lock;
+ bool blink_was_down;
+ unsigned long state;
+ u32 flags;
u16 num_txd;
u16 num_rxd;
u8 mc_enabled;
u8 max_mc_count;
- u8 rss_supported;
u8 fw_wait_cnt;
u8 fw_fail_cnt;
u8 tx_timeo_cnt;
u32 fw_hal_version;
u32 capabilities;
- u32 flags;
u32 irq;
u32 temp;
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
+ unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct vlan_group *vlgrp;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
struct qlcnic_adapter_stats stats;
-
- struct qlcnic_recv_context recv_ctx;
- struct qlcnic_host_tx_ring *tx_ring;
+ struct list_head mac_list;
void __iomem *tgt_mask_reg;
void __iomem *tgt_status_reg;
struct delayed_work fw_work;
- struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_filter_hash fhash;
- unsigned long state;
+ spinlock_t tx_clean_lock;
+ spinlock_t mac_learn_lock;
__le32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
__le16 min_tx_bw;
__le16 max_tx_bw;
u8 reserved2[104];
-};
+} __packed;
struct qlcnic_pci_info {
__le16 id; /* pci function id */
u8 mac[ETH_ALEN];
u8 reserved2[106];
-};
+} __packed;
struct qlcnic_npar_info {
u16 pvid;
__le64 local_frames;
__le64 numbytes;
__le64 rsvd[3];
-};
+} __packed;
struct qlcnic_esw_statistics {
struct __qlcnic_esw_statistics rx;
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
-void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring);
int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
{
- smp_mb();
- if (tx_ring->producer < tx_ring->sw_consumer)
+ if (likely(tx_ring->producer < tx_ring->sw_consumer))
return tx_ring->sw_consumer - tx_ring->producer;
else
return tx_ring->sw_consumer + tx_ring->num_desc -
#include <linux/swab.h>
#include <linux/dma-mapping.h>
-#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+static void qlcnic_vlan_rx_add(struct net_device *, u16);
+static void qlcnic_vlan_rx_del(struct net_device *, u16);
+
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
-void
+inline void
qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
- qlcnic_free_sds_rings(&adapter->recv_ctx);
+ qlcnic_free_sds_rings(adapter->recv_ctx);
}
static void
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
return 0;
}
-static void qlcnic_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- adapter->vlgrp = grp;
-}
-
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
.ndo_set_mac_address = qlcnic_set_mac,
.ndo_change_mtu = qlcnic_change_mtu,
.ndo_tx_timeout = qlcnic_tx_timeout,
- .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
struct pci_dev *pdev = adapter->pdev;
int err, num_msix;
- if (adapter->rss_supported) {
+ if (adapter->msix_supported) {
num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
MSIX_ENTRIES_PER_ADAPTER : 2;
} else
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
- legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+ legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
adapter->int_vec_bit = legacy_intrp->int_vec_bit;
adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
adapter->flags |= QLCNIC_MSIX_ENABLED;
qlcnic_set_msix_bit(pdev, 1);
- if (adapter->rss_supported)
- adapter->max_sds_rings = num_msix;
+ adapter->max_sds_rings = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
return;
if (use_msi && !pci_enable_msi(pdev)) {
adapter->flags |= QLCNIC_MSI_ENABLED;
adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
- msi_tgt_status[adapter->ahw.pci_func]);
+ msi_tgt_status[adapter->ahw->pci_func]);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
return;
static void
qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
{
- if (adapter->ahw.pci_base0 != NULL)
- iounmap(adapter->ahw.pci_base0);
+ if (adapter->ahw->pci_base0 != NULL)
+ iounmap(adapter->ahw->pci_base0);
}
static int
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
+ if (pfn > QLCNIC_MAX_PCI_FUNC) {
+ ret = QL_STATUS_INVALID_PARAM;
+ goto err_eswitch;
+ }
adapter->npars[pfn].active = (u8)pci_info[i].active;
adapter->npars[pfn].type = (u8)pci_info[i].type;
adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
u32 ref_count;
int i, ret = 1;
u32 data = QLCNIC_MGMT_FUNC;
- void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
/* If other drivers are not in use set their privilege level */
ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
id = i;
if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
- id == adapter->ahw.pci_func)
+ id == adapter->ahw->pci_func)
continue;
data |= (qlcnic_config_npars &
QLC_DEV_SET_DRV(0xf, id));
}
} else {
data = readl(priv_op);
- data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
+ data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
- adapter->ahw.pci_func));
+ adapter->ahw->pci_func));
}
writel(data, priv_op);
qlcnic_api_unlock(adapter);
u32 op_mode, priv_level;
/* Determine FW API version */
- adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
+ adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
+ QLCNIC_FW_API);
/* Find PCI function number */
pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
- msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
msix_base = readl(msix_base_addr);
func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
- adapter->ahw.pci_func = func;
+ adapter->ahw->pci_func = func;
/* Determine function privilege level */
- priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
op_mode = readl(priv_op);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
- priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (priv_level == QLCNIC_NON_PRIV_FUNC) {
adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
- adapter->ahw.pci_base0 = mem_ptr0;
- adapter->ahw.pci_len0 = pci_len0;
+ adapter->ahw->pci_base0 = mem_ptr0;
+ adapter->ahw->pci_len0 = pci_len0;
qlcnic_check_vf(adapter);
- adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
+ adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
+ adapter->ahw->pci_func)));
return 0;
}
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
- if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- } else if (adapter->ahw.port_type == QLCNIC_GBE) {
+ } else if (adapter->ahw->port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = !!use_msi_x;
- adapter->rss_supported = !!use_msi_x;
adapter->num_txd = MAX_CMD_DESCRIPTORS;
int err;
struct qlcnic_info nic_info;
- err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
+ err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
if (err)
return err;
adapter->pvid = 0;
}
+static void
+qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ set_bit(vid, adapter->vlans);
+}
+
+static void
+qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
+ clear_bit(vid, adapter->vlans);
+}
+
static void
qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return 0;
- esw_cfg.pci_func = adapter->ahw.pci_func;
+ esw_cfg.pci_func = adapter->ahw->pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
return -EIO;
qlcnic_set_vlan_config(adapter, &esw_cfg);
features = (NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO);
vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
+ NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
features |= (NETIF_F_TSO | NETIF_F_TSO6);
vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+
+ if (netdev->features & NETIF_F_LRO)
features |= NETIF_F_LRO;
if (esw_cfg->offload_flags & BIT_0) {
if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
return 0;
- priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
op_mode = readl(priv_op);
- priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
- priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
if (priv_level == QLCNIC_MGMT_FUNC) {
unsigned long flags = 0;
struct net_device *netdev = adapter->netdev;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
handler = qlcnic_tmp_intr;
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
}
}
-static void
-qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
-{
- adapter->coal.flags = QLCNIC_INTR_DEFAULT;
- adapter->coal.normal.data.rx_time_us =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
- adapter->coal.normal.data.rx_packets =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
- adapter->coal.normal.data.tx_time_us =
- QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
- adapter->coal.normal.data.tx_packets =
- QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
-}
-
static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
return -EIO;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
- rds_ring = &adapter->recv_ctx.rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring);
}
qlcnic_set_multi(netdev);
qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
- adapter->ahw.linkup = 0;
+ adapter->ahw->linkup = 0;
if (adapter->max_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
goto err_out_free_hw;
}
- qlcnic_init_coalesce_defaults(adapter);
-
qlcnic_create_sysfs_entries(adapter);
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
}
}
netif_device_attach(netdev);
}
+static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
+ GFP_KERNEL);
+ if (!adapter->ahw) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate recv ctx resources for adapter\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
+ GFP_KERNEL);
+ if (!adapter->recv_ctx) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate recv ctx resources for adapter\n");
+ kfree(adapter->ahw);
+ adapter->ahw = NULL;
+ err = -ENOMEM;
+ goto err_out;
+ }
+ /* Initialize interrupt coalesce parameters */
+ adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+ adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+err_out:
+ return err;
+}
+
+static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ kfree(adapter->recv_ctx);
+ adapter->recv_ctx = NULL;
+
+ kfree(adapter->ahw);
+ adapter->ahw = NULL;
+}
+
int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
- rds_ring = &adapter->recv_ctx.rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring);
}
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_int(sds_ring);
}
}
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
+ NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
- adapter->dev_rst_time = jiffies;
+ if (qlcnic_alloc_adapter_resources(adapter))
+ goto err_out_free_netdev;
+
+ adapter->dev_rst_time = jiffies;
revision_id = pdev->revision;
- adapter->ahw.revision_id = revision_id;
+ adapter->ahw->revision_id = revision_id;
- rwlock_init(&adapter->ahw.crb_lock);
- mutex_init(&adapter->ahw.mem_lock);
+ rwlock_init(&adapter->ahw->crb_lock);
+ mutex_init(&adapter->ahw->mem_lock);
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
err = qlcnic_setup_pci_map(adapter);
if (err)
- goto err_out_free_netdev;
+ goto err_out_free_hw;
/* This will be reset for mezz cards */
- adapter->portnum = adapter->ahw.pci_func;
+ adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_get_board_info(adapter);
if (err) {
pr_info("%s: %s Board Chip rev 0x%x\n",
module_name(THIS_MODULE),
- brd_name, adapter->ahw.revision_id);
+ brd_name, adapter->ahw->revision_id);
}
qlcnic_clear_stats(adapter);
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
- switch (adapter->ahw.port_type) {
+ switch (adapter->ahw->port_type) {
case QLCNIC_GBE:
dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
adapter->netdev->name);
err_out_iounmap:
qlcnic_cleanup_pci_map(adapter);
+err_out_free_hw:
+ qlcnic_free_adapter_resources(adapter);
+
err_out_free_netdev:
free_netdev(netdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ qlcnic_free_adapter_resources(adapter);
free_netdev(netdev);
}
static int __qlcnic_shutdown(struct pci_dev *pdev)
vlan_req->vlan_id = vlan_id;
tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+ smp_mb();
}
#define QLCNIC_MAC_HASH(MAC)\
spin_unlock(&adapter->mac_learn_lock);
}
-static void
-qlcnic_tso_check(struct net_device *netdev,
- struct qlcnic_host_tx_ring *tx_ring,
+static int
+qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
- u8 opcode = TX_ETHER_PKT;
- __be16 protocol = skb->protocol;
- u16 flags = 0;
- int copied, offset, copy_len, hdr_len = 0, tso = 0;
+ u8 opcode = 0, hdr_len = 0;
+ u16 flags = 0, vlan_tci = 0;
+ int copied, offset, copy_len;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ u16 protocol = ntohs(skb->protocol);
u32 producer = tx_ring->producer;
- __le16 vlan_oob = first_desc->flags_opcode &
- cpu_to_le16(FLAGS_VLAN_OOB);
+
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ flags = FLAGS_VLAN_TAGGED;
+ vlan_tci = vh->h_vlan_TCI;
+ } else if (vlan_tx_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = vlan_tx_tag_get(skb);
+ }
+ if (unlikely(adapter->pvid)) {
+ if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = adapter->pvid;
+ }
+set_flags:
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
if (*(skb->data) & BIT_0) {
flags |= BIT_0;
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
-
- if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ opcode = TX_ETHER_PKT;
+ if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
- if (vlan_oob) {
+
+ opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring */
+ copied = 0;
+ offset = 2;
+
+ if (flags & FLAGS_VLAN_OOB) {
first_desc->total_hdr_length += VLAN_HLEN;
first_desc->tcp_hdr_offset = VLAN_HLEN;
first_desc->ip_hdr_offset = VLAN_HLEN;
/* Only in case of TSO on vlan device */
flags |= FLAGS_VLAN_TAGGED;
+
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vlan_tci);
+
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
}
- opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
- TX_TCP_LSO6 : TX_TCP_LSO;
- tso = 1;
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *) hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ smp_mb();
+ adapter->stats.lso_frames++;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4proto;
- if (protocol == cpu_to_be16(ETH_P_IP)) {
+ if (protocol == ETH_P_IP) {
l4proto = ip_hdr(skb)->protocol;
if (l4proto == IPPROTO_TCP)
opcode = TX_TCP_PKT;
else if (l4proto == IPPROTO_UDP)
opcode = TX_UDP_PKT;
- } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ } else if (protocol == ETH_P_IPV6) {
l4proto = ipv6_hdr(skb)->nexthdr;
if (l4proto == IPPROTO_TCP)
opcode = TX_UDPV6_PKT;
}
}
-
first_desc->tcp_hdr_offset += skb_transport_offset(skb);
first_desc->ip_hdr_offset += skb_network_offset(skb);
qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
- if (!tso)
- return;
-
- /* For LSO, we need to copy the MAC/IP/TCP headers into
- * the descriptor ring
- */
- copied = 0;
- offset = 2;
-
- if (vlan_oob) {
- /* Create a TSO vlan header template for firmware */
-
- hwdesc = &tx_ring->desc_head[producer];
- tx_ring->cmd_buf_arr[producer].skb = NULL;
-
- copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
- hdr_len + VLAN_HLEN);
-
- vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
- skb_copy_from_linear_data(skb, vh, 12);
- vh->h_vlan_proto = htons(ETH_P_8021Q);
- vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
-
- skb_copy_from_linear_data_offset(skb, 12,
- (char *)vh + 16, copy_len - 16);
-
- copied = copy_len - VLAN_HLEN;
- offset = 0;
-
- producer = get_next_index(producer, tx_ring->num_desc);
- }
-
- while (copied < hdr_len) {
-
- copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
- (hdr_len - copied));
-
- hwdesc = &tx_ring->desc_head[producer];
- tx_ring->cmd_buf_arr[producer].skb = NULL;
-
- skb_copy_from_linear_data_offset(skb, copied,
- (char *)hwdesc + offset, copy_len);
-
- copied += copy_len;
- offset = 0;
-
- producer = get_next_index(producer, tx_ring->num_desc);
- }
-
- tx_ring->producer = producer;
- barrier();
- adapter->stats.lso_frames++;
+ return 0;
}
static int
return -ENOMEM;
}
-static int
-qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
- struct sk_buff *skb,
- struct cmd_desc_type0 *first_desc)
+static void
+qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
{
- u8 opcode = 0;
- u16 flags = 0;
- __be16 protocol = skb->protocol;
- struct vlan_ethhdr *vh;
+ struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int i;
- if (protocol == cpu_to_be16(ETH_P_8021Q)) {
- vh = (struct vlan_ethhdr *)skb->data;
- protocol = vh->h_vlan_encapsulated_proto;
- flags = FLAGS_VLAN_TAGGED;
- qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
- } else if (vlan_tx_tag_present(skb)) {
- flags = FLAGS_VLAN_OOB;
- qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
+ for (i = 0; i < nr_frags; i++) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
}
- if (unlikely(adapter->pvid)) {
- if (first_desc->vlan_TCI &&
- !(adapter->flags & QLCNIC_TAGGING_ENABLED))
- return -EIO;
- if (first_desc->vlan_TCI &&
- (adapter->flags & QLCNIC_TAGGING_ENABLED))
- goto set_flags;
- flags = FLAGS_VLAN_OOB;
- qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
- }
-set_flags:
- qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
- return 0;
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
}
static inline void
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
struct ethhdr *phdr;
+ int delta = 0;
int i, k;
u32 producer;
- int frag_count, no_of_desc;
+ int frag_count;
u32 num_txd = tx_ring->num_desc;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
}
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_shinfo(skb)->frags[i].size;
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
- /* 4 fragments per cmd des */
- no_of_desc = (frag_count + 3) >> 2;
-
if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
- smp_mb();
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_start_queue(netdev);
else {
first_desc = hwdesc = &tx_ring->desc_head[producer];
qlcnic_clear_cmddesc((u64 *)hwdesc);
- if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
- goto drop_packet;
-
if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
adapter->stats.tx_dma_map_error++;
goto drop_packet;
}
tx_ring->producer = get_next_index(producer, num_txd);
+ smp_mb();
- qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ goto unwind_buff;
if (qlcnic_mac_learn)
qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
return NETDEV_TX_OK;
+unwind_buff:
+ qlcnic_unmap_buffers(pdev, skb, pbuf);
drop_packet:
adapter->stats.txdropped++;
dev_kfree_skb_any(skb);
{
struct net_device *netdev = adapter->netdev;
- if (adapter->ahw.linkup && !linkup) {
+ if (adapter->ahw->linkup && !linkup) {
netdev_info(netdev, "NIC Link is down\n");
- adapter->ahw.linkup = 0;
+ adapter->ahw->linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
- } else if (!adapter->ahw.linkup && linkup) {
+ } else if (!adapter->ahw->linkup && linkup) {
netdev_info(netdev, "NIC Link is up\n");
- adapter->ahw.linkup = 1;
+ adapter->ahw->linkup = 1;
if (netif_running(netdev)) {
netif_carrier_on(netdev);
netif_wake_queue(netdev);
int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
disable_irq(adapter->irq);
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
u8 pci_func;
int i;
- op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
+ op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
return QL_STATUS_INVALID_PARAM;
- if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
+ if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
op_mode = esw_cfg[i].op_mode;
qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
esw_cfg[i].op_mode = op_mode;
- esw_cfg[i].pci_func = adapter->ahw.pci_func;
+ esw_cfg[i].pci_func = adapter->ahw->pci_func;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_pci_config))
+ dev_info(dev, "failed to create pci config sysfs entry");
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
if (device_create_bin_file(dev, &bin_attr_esw_config))
dev_info(dev, "failed to create esw config sysfs entry");
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
- if (device_create_bin_file(dev, &bin_attr_pci_config))
- dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_npar_config))
dev_info(dev, "failed to create npar config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_pm_config))
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
+ device_remove_bin_file(dev, &bin_attr_pci_config);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
device_remove_bin_file(dev, &bin_attr_esw_config);
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
- device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_bin_file(dev, &bin_attr_npar_config);
device_remove_bin_file(dev, &bin_attr_pm_config);
device_remove_bin_file(dev, &bin_attr_esw_stats);
qlcnic_config_indev_addr(adapter, netdev, event);
- if (!adapter->vlgrp)
- return;
-
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- dev = vlan_group_get_device(adapter->vlgrp, vid);
+ for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
+ dev = vlan_find_dev(netdev, vid);
if (!dev)
continue;
-
qlcnic_config_indev_addr(adapter, dev, event);
}
}
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
- * Since we are touching interrupts the caller should hold the suspend lock
+ * This is for use only during a loopback self-test. It must not
+ * deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
+ BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
+ if (efx_dev_registered(efx) && !efx->port_inhibited)
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
/* Otherwise efx_start_port() will do this */
}
+static int efx_set_features(struct net_device *net_dev, u32 data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE)
+ efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+
+ return 0;
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_multicast_list = efx_set_multicast_list,
+ .ndo_set_features = efx_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
return false;
}
-static struct efx_phy_operations efx_dummy_phy_operations = {
+static const struct efx_phy_operations efx_dummy_phy_operations = {
.init = efx_port_dummy_op_int,
.reconfigure = efx_port_dummy_op_int,
.poll = efx_port_dummy_op_poll,
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
int i;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
- efx->rx_checksum_enabled = true;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
- struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
+ const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
int i, rc;
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
- NETIF_F_GRO);
+ NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_TSO);
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+ /* All offloads can be toggled */
+ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
* @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
- * @rx_checksum_enabled: RX checksumming enabled
* @stats_buffer: DMA buffer for statistics
* @mac_op: MAC interface
* @phy_type: PHY type
bool port_initialized;
struct net_device *net_dev;
- bool rx_checksum_enabled;
struct efx_buffer stats_buffer;
- struct efx_mac_operations *mac_op;
+ const struct efx_mac_operations *mac_op;
unsigned int phy_type;
- struct efx_phy_operations *phy_op;
+ const struct efx_phy_operations *phy_op;
void *phy_data;
struct mdio_if_info mdio;
unsigned int mdio_bus;
void (*resume_wol)(struct efx_nic *efx);
int (*test_registers)(struct efx_nic *efx);
int (*test_nvram)(struct efx_nic *efx);
- struct efx_mac_operations *default_mac_ops;
+ const struct efx_mac_operations *default_mac_ops;
int revision;
unsigned int mem_map_size;
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
- return ((efx_qword_t *) (channel->eventq.addr)) + index;
+ return ((efx_qword_t *) (channel->eventq.addr)) +
+ (index & channel->eventq_mask);
}
/* See if an event is present
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
channel->channel);
}
unsigned expected_ptr;
bool rx_ev_pkt_ok, discard = false, checksummed;
struct efx_rx_queue *rx_queue;
- struct efx_nic *efx = channel->efx;
/* Basic packet information */
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
* UDP/IP, then we can rely on the hardware checksum.
*/
checksummed =
- likely(efx->rx_checksum_enabled) &&
- (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
- rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
} else {
efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
checksummed = false;
code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
if (code == EFX_CHANNEL_MAGIC_TEST(channel))
- ++channel->magic_count;
+ ; /* ignore */
else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
return spent;
}
+ /* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+ bool efx_nic_event_present(struct efx_channel *channel)
+ {
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+ }
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+ unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;
int wol_filter_id;
};
-extern struct efx_nic_type falcon_a1_nic_type;
-extern struct efx_nic_type falcon_b0_nic_type;
-extern struct efx_nic_type siena_a0_nic_type;
+extern const struct efx_nic_type falcon_a1_nic_type;
+extern const struct efx_nic_type falcon_b0_nic_type;
+extern const struct efx_nic_type siena_a0_nic_type;
/**************************************************************************
*
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+ extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
}
EXPORT_SYMBOL(ath9k_hw_wait);
+void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
+ int column, unsigned int *writecnt)
+{
+ int r;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+ for (r = 0; r < array->ia_rows; r++) {
+ REG_WRITE(ah, INI_RA(array, r, 0),
+ INI_RA(array, r, column));
+ DO_DELAY(*writecnt);
+ }
+ REGWRITE_BUFFER_FLUSH(ah);
+}
+
u32 ath9k_hw_reverse_bits(u32 val, u32 n)
{
u32 retval;
return retval;
}
-bool ath9k_get_channel_edges(struct ath_hw *ah,
- u16 flags, u16 *low,
- u16 *high)
-{
- struct ath9k_hw_capabilities *pCap = &ah->caps;
-
- if (flags & CHANNEL_5GHZ) {
- *low = pCap->low_5ghz_chan;
- *high = pCap->high_5ghz_chan;
- return true;
- }
- if ((flags & CHANNEL_2GHZ)) {
- *low = pCap->low_2ghz_chan;
- *high = pCap->high_2ghz_chan;
- return true;
- }
- return false;
-}
-
u16 ath9k_hw_computetxtime(struct ath_hw *ah,
u8 phy, int kbps,
u32 frameLen, u16 rateix,
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
- if (ah->hw_version.devid != AR2427_DEVID_PCIE)
- ah->config.ht_enable = 1;
- else
- ah->config.ht_enable = 0;
-
/* PAPRD needs some more work to be enabled */
ah->config.paprd_disable = 1;
ah->sta_id1_defaults =
AR_STA_ID1_CRPT_MIC_ENABLE |
AR_STA_ID1_MCAST_KSRCH;
+ if (AR_SREV_9100(ah))
+ ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = 20;
ah->globaltxtimeout = (u32) -1;
unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
{
- REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
- udelay(100);
- REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
+ REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
+ udelay(100);
+ REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
- while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
- udelay(100);
+ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+ udelay(100);
- return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+ return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
}
EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
ah->misc_mode);
if (ah->misc_mode != 0)
- REG_WRITE(ah, AR_PCU_MISC,
- REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
+ REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
sifstime = 16;
static inline void ath9k_hw_set_dma(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- u32 regval;
ENABLE_REGWRITE_BUFFER(ah);
/*
* set AHB_MODE not to do cacheline prefetches
*/
- if (!AR_SREV_9300_20_OR_LATER(ah)) {
- regval = REG_READ(ah, AR_AHB_MODE);
- REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
- }
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN);
/*
* let mac dma reads be in 128 byte chunks
*/
- regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
- REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
+ REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK);
REGWRITE_BUFFER_FLUSH(ah);
/*
* let mac dma writes be in 128 byte chunks
*/
- regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
- REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
+ REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK);
/*
* Setup receive FIFO threshold to hold off TX activities
static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
{
- u32 val;
+ u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
+ u32 set = AR_STA_ID1_KSRCH_MODE;
- val = REG_READ(ah, AR_STA_ID1);
- val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
switch (opmode) {
- case NL80211_IFTYPE_AP:
- REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
- | AR_STA_ID1_KSRCH_MODE);
- REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
- break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
- REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
- | AR_STA_ID1_KSRCH_MODE);
+ set |= AR_STA_ID1_ADHOC;
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
+ case NL80211_IFTYPE_AP:
+ set |= AR_STA_ID1_STA_AP;
+ /* fall through */
case NL80211_IFTYPE_STATION:
- REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
+ REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
default:
- if (ah->is_monitoring)
- REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
+ if (!ah->is_monitoring)
+ set = 0;
break;
}
+ REG_RMW(ah, AR_STA_ID1, set, mask);
}
void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
u32 tmpReg;
if (AR_SREV_9100(ah)) {
- u32 val = REG_READ(ah, AR_RTC_DERIVED_CLK);
- val &= ~AR_RTC_DERIVED_CLK_PERIOD;
- val |= SM(1, AR_RTC_DERIVED_CLK_PERIOD);
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, val);
+ REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
+ AR_RTC_DERIVED_CLK_PERIOD, 1);
(void)REG_READ(ah, AR_RTC_DERIVED_CLK);
}
return true;
}
+static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
+{
+ u32 gpio_mask = ah->gpio_mask;
+ int i;
+
+ for (i = 0; gpio_mask; i++, gpio_mask >>= 1) {
+ if (!(gpio_mask & 1))
+ continue;
+
+ ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
+ }
+}
+
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
- if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
- ath9k_hw_abortpcurecv(ah);
- if (!ath9k_hw_stopdmarecv(ah)) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Failed to stop receive dma\n");
- bChannelChange = false;
- }
- }
-
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
REGWRITE_BUFFER_FLUSH(ah);
ah->intr_txqs = 0;
- for (i = 0; i < ah->caps.total_queues; i++)
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
ath9k_hw_resettxqueue(ah, i);
ath9k_hw_init_interrupt_masks(ah, ah->opmode);
ar9002_hw_enable_wep_aggregation(ah);
}
- REG_WRITE(ah, AR_STA_ID1,
- REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
ath9k_hw_set_dma(ah);
if (AR_SREV_9300_20_OR_LATER(ah))
ar9003_hw_bb_watchdog_config(ah);
+ ath9k_hw_apply_gpio_override(ah);
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_reset);
case NL80211_IFTYPE_MESH_POINT:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
- REG_WRITE(ah, AR_NEXT_NDP_TIMER,
- TU_TO_USEC(next_beacon +
- (ah->atim_window ? ah->
- atim_window : 1)));
+ REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
+ TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
flags |= AR_NDP_TIMER_EN;
case NL80211_IFTYPE_AP:
- REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
- REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
- TU_TO_USEC(next_beacon -
- ah->config.
- dma_beacon_response_time));
- REG_WRITE(ah, AR_NEXT_SWBA,
- TU_TO_USEC(next_beacon -
- ah->config.
- sw_beacon_response_time));
+ REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
+ REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
+ TU_TO_USEC(ah->config.dma_beacon_response_time));
+ REG_WRITE(ah, AR_NEXT_SWBA, next_beacon -
+ TU_TO_USEC(ah->config.sw_beacon_response_time));
flags |=
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
break;
}
- REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
- REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period));
- REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
- REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
+ REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
+ REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
+ REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
+ REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
REGWRITE_BUFFER_FLUSH(ah);
- beacon_period &= ~ATH9K_BEACON_ENA;
- if (beacon_period & ATH9K_BEACON_RESET_TSF) {
- ath9k_hw_reset_tsf(ah);
- }
-
REG_SET_BIT(ah, AR_TIMER_MODE, flags);
}
EXPORT_SYMBOL(ath9k_hw_beaconinit);
!(AR_SREV_9271(ah)))
/* CB71: GPIO 0 is pulled down to indicate 3 rx chains */
pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7;
+ else if (AR_SREV_9100(ah))
+ pCap->rx_chainmask = 0x7;
else
/* Use rx_chainmask from EEPROM. */
pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
if (AR_SREV_9300_20_OR_LATER(ah))
ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
- pCap->low_2ghz_chan = 2312;
- pCap->high_2ghz_chan = 2732;
-
- pCap->low_5ghz_chan = 4920;
- pCap->high_5ghz_chan = 6100;
-
common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
- if (ah->config.ht_enable)
+ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
pCap->hw_caps |= ATH9K_HW_CAP_HT;
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
- if (capField & AR_EEPROM_EEPCAP_MAXQCU)
- pCap->total_queues =
- MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
- else
- pCap->total_queues = ATH9K_NUM_TX_QUEUES;
-
- if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
- pCap->keycache_size =
- 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
- else
- pCap->keycache_size = AR_KEYTABLE_SIZE;
-
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
- pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
- else
- pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
-
if (AR_SREV_9271(ah))
pCap->num_gpio_pins = AR9271_NUM_GPIO;
else if (AR_DEVID_7010(ah))
pCap->rts_aggr_limit = (8 * 1024);
}
- pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
-
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
else
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
- if (regulatory->current_rd_ext & (1 << REG_EXT_JAPAN_MIDBAND)) {
- pCap->reg_cap =
- AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
- AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
- AR_EEPROM_EEREGCAP_EN_KK_U2 |
- AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
- } else {
- pCap->reg_cap =
- AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
- AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
- }
-
- /* Advertise midband for AR5416 with FCC midband set in eeprom */
- if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
- AR_SREV_5416(ah))
- pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
-
if (AR_SREV_9280_20_OR_LATER(ah) && common->btcoex_enabled) {
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
REG_WRITE(ah, AR_PHY_ERR, phybits);
if (phybits)
- REG_WRITE(ah, AR_RXCFG,
- REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
+ REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
else
- REG_WRITE(ah, AR_RXCFG,
- REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
+ REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
REGWRITE_BUFFER_FLUSH(ah);
}
return timer_table->gen_timer_index[b];
}
-static u32 ath9k_hw_gettsf32(struct ath_hw *ah)
+u32 ath9k_hw_gettsf32(struct ath_hw *ah)
{
return REG_READ(ah, AR_TSF_L32);
}
+EXPORT_SYMBOL(ath9k_hw_gettsf32);
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
{
u32 cw;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath9k_tx_queue_info *qi;
- if (q >= pCap->total_queues) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Set TXQ properties, invalid queue: %u\n", q);
- return false;
- }
-
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
ath_dbg(common, ATH_DBG_QUEUE,
struct ath9k_tx_queue_info *qinfo)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath9k_tx_queue_info *qi;
- if (q >= pCap->total_queues) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Get TXQ properties, invalid queue: %u\n", q);
- return false;
- }
-
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
ath_dbg(common, ATH_DBG_QUEUE,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info *qi;
- struct ath9k_hw_capabilities *pCap = &ah->caps;
int q;
switch (type) {
case ATH9K_TX_QUEUE_BEACON:
- q = pCap->total_queues - 1;
+ q = ATH9K_NUM_TX_QUEUES - 1;
break;
case ATH9K_TX_QUEUE_CAB:
- q = pCap->total_queues - 2;
+ q = ATH9K_NUM_TX_QUEUES - 2;
break;
case ATH9K_TX_QUEUE_PSPOLL:
q = 1;
break;
case ATH9K_TX_QUEUE_UAPSD:
- q = pCap->total_queues - 3;
+ q = ATH9K_NUM_TX_QUEUES - 3;
break;
case ATH9K_TX_QUEUE_DATA:
- for (q = 0; q < pCap->total_queues; q++)
+ for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
if (ah->txq[q].tqi_type ==
ATH9K_TX_QUEUE_INACTIVE)
break;
- if (q == pCap->total_queues) {
+ if (q == ATH9K_NUM_TX_QUEUES) {
ath_err(common, "No available TX queue\n");
return -1;
}
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
{
- struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info *qi;
- if (q >= pCap->total_queues) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Release TXQ, invalid queue: %u\n", q);
- return false;
- }
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
ath_dbg(common, ATH_DBG_QUEUE,
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
{
- struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ath9k_tx_queue_info *qi;
u32 cwMin, chanCwMin, value;
- if (q >= pCap->total_queues) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Reset TXQ, invalid queue: %u\n", q);
- return false;
- }
-
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
ath_dbg(common, ATH_DBG_QUEUE,
REG_WRITE(ah, AR_QCBRCFG(q),
SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
- REG_WRITE(ah, AR_QMISC(q),
- REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
- (qi->tqi_cbrOverflowLimit ?
- AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
+ REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
+ (qi->tqi_cbrOverflowLimit ?
+ AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
}
if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
REG_WRITE(ah, AR_QRDYTIMECFG(q),
(qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
if (qi->tqi_burstTime
- && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
- REG_WRITE(ah, AR_QMISC(q),
- REG_READ(ah, AR_QMISC(q)) |
- AR_Q_MISC_RDYTIME_EXP_POLICY);
+ && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
+ REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
- }
-
- if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
- REG_WRITE(ah, AR_DMISC(q),
- REG_READ(ah, AR_DMISC(q)) |
- AR_D_MISC_POST_FR_BKOFF_DIS);
- }
+ if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
+ REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
REGWRITE_BUFFER_FLUSH(ah);
- if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
- REG_WRITE(ah, AR_DMISC(q),
- REG_READ(ah, AR_DMISC(q)) |
- AR_D_MISC_FRAG_BKOFF_EN);
- }
+ if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
+ REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
+
switch (qi->tqi_type) {
case ATH9K_TX_QUEUE_BEACON:
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
- | AR_Q_MISC_FSP_DBA_GATED
- | AR_Q_MISC_BEACON_USE
- | AR_Q_MISC_CBR_INCR_DIS1);
+ REG_SET_BIT(ah, AR_QMISC(q),
+ AR_Q_MISC_FSP_DBA_GATED
+ | AR_Q_MISC_BEACON_USE
+ | AR_Q_MISC_CBR_INCR_DIS1);
- REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
- | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
+ REG_SET_BIT(ah, AR_DMISC(q),
+ (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
- | AR_D_MISC_BEACON_USE
- | AR_D_MISC_POST_FR_BKOFF_DIS);
+ | AR_D_MISC_BEACON_USE
+ | AR_D_MISC_POST_FR_BKOFF_DIS);
REGWRITE_BUFFER_FLUSH(ah);
case ATH9K_TX_QUEUE_CAB:
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
- | AR_Q_MISC_FSP_DBA_GATED
- | AR_Q_MISC_CBR_INCR_DIS1
- | AR_Q_MISC_CBR_INCR_DIS0);
+ REG_SET_BIT(ah, AR_QMISC(q),
+ AR_Q_MISC_FSP_DBA_GATED
+ | AR_Q_MISC_CBR_INCR_DIS1
+ | AR_Q_MISC_CBR_INCR_DIS0);
value = (qi->tqi_readyTime -
(ah->config.sw_beacon_response_time -
ah->config.dma_beacon_response_time) -
ah->config.additional_swba_backoff) * 1024;
REG_WRITE(ah, AR_QRDYTIMECFG(q),
value | AR_Q_RDYTIMECFG_EN);
- REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
- | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
+ REG_SET_BIT(ah, AR_DMISC(q),
+ (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
REGWRITE_BUFFER_FLUSH(ah);
break;
case ATH9K_TX_QUEUE_PSPOLL:
- REG_WRITE(ah, AR_QMISC(q),
- REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
+ REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
break;
case ATH9K_TX_QUEUE_UAPSD:
- REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
- AR_D_MISC_POST_FR_BKOFF_DIS);
+ REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
break;
default:
break;
}
if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
- REG_WRITE(ah, AR_DMISC(q),
- REG_READ(ah, AR_DMISC(q)) |
- SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
- AR_D_MISC_ARB_LOCKOUT_CNTRL) |
- AR_D_MISC_POST_FR_BKOFF_DIS);
+ REG_SET_BIT(ah, AR_DMISC(q),
+ SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
+ AR_D_MISC_ARB_LOCKOUT_CNTRL) |
+ AR_D_MISC_POST_FR_BKOFF_DIS);
}
if (AR_SREV_9300_20_OR_LATER(ah))
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
- bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
+ bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
-#define AH_RX_TIME_QUANTUM 100 /* usec */
struct ath_common *common = ath9k_hw_common(ah);
+ u32 mac_status, last_mac_status = 0;
int i;
+ /* Enable access to the DMA observation bus */
+ REG_WRITE(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
REG_WRITE(ah, AR_CR, AR_CR_RXD);
/* Wait for rx enable bit to go low */
for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
break;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
+ if (mac_status == 0x1c0 && mac_status == last_mac_status) {
+ *reset = true;
+ break;
+ }
+
+ last_mac_status = mac_status;
+ }
+
udelay(AH_TIME_QUANTUM);
}
if (i == 0) {
ath_err(common,
- "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
AH_RX_STOP_DMA_TIMEOUT / 1000,
REG_READ(ah, AR_CR),
- REG_READ(ah, AR_DIAG_SW));
+ REG_READ(ah, AR_DIAG_SW),
+ REG_READ(ah, AR_DMADBG_7));
return false;
} else {
return true;
}
-#undef AH_RX_TIME_QUANTUM
#undef AH_RX_STOP_DMA_TIMEOUT
}
EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- bool stopped;
+ bool stopped, reset = false;
spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
- stopped = ath9k_hw_stopdmarecv(ah);
+ stopped = ath9k_hw_stopdmarecv(ah, &reset);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
"confusing the DMA engine when we start RX up\n");
ATH_DBG_WARN_ON_ONCE(!stopped);
}
- return stopped;
+ return stopped || reset;
}
void ath_flushrecv(struct ath_softc *sc)
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, ATH_DBG_PS,
"Reconfigure Beacon timers based on timestamp from the AP\n");
- ath_beacon_config(sc, NULL);
+ ath_set_beacon(sc);
}
if (ath_beacon_dtim_pending_cab(skb)) {
__this_cpu_inc(softnet_data.processed);
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+ skb = vlan_untag(skb);
+ if (unlikely(!skb))
+ goto out;
+ }
+
#ifdef CONFIG_NET_CLS_ACT
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
- if (vlan_hwaccel_do_receive(&skb)) {
+ if (vlan_do_receive(&skb)) {
ret = __netif_receive_skb(skb);
goto out;
} else if (unlikely(!skb))
}
/* TSO requires that SG is present as well. */
- if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
- features &= ~NETIF_F_TSO;
+ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
+ netdev_info(dev, "Dropping TSO features since no SG feature.\n");
+ features &= ~NETIF_F_ALL_TSO;
}
+ /* TSO ECN requires that TSO is present as well. */
+ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
+ features &= ~NETIF_F_TSO_ECN;
+
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
}
EXPORT_SYMBOL(netdev_fix_features);
-void netdev_update_features(struct net_device *dev)
+int __netdev_update_features(struct net_device *dev)
{
u32 features;
int err = 0;
+ ASSERT_RTNL();
+
features = netdev_get_wanted_features(dev);
if (dev->netdev_ops->ndo_fix_features)
features = netdev_fix_features(dev, features);
if (dev->features == features)
- return;
+ return 0;
netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
dev->features, features);
if (dev->netdev_ops->ndo_set_features)
err = dev->netdev_ops->ndo_set_features(dev, features);
- if (!err)
- dev->features = features;
- else if (err < 0)
+ if (unlikely(err < 0)) {
netdev_err(dev,
"set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
err, features, dev->features);
+ return -1;
+ }
+
+ if (!err)
+ dev->features = features;
+
+ return 1;
+}
+
+void netdev_update_features(struct net_device *dev)
+{
+ if (__netdev_update_features(dev))
+ netdev_features_change(dev);
}
EXPORT_SYMBOL(netdev_update_features);
dev->features &= ~NETIF_F_GSO;
}
+ /* Turn on no cache copy if HW is doing checksum */
+ dev->hw_features |= NETIF_F_NOCACHE_COPY;
+ if ((dev->features & NETIF_F_ALL_CSUM) &&
+ !(dev->features & NETIF_F_NO_CSUM)) {
+ dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+ dev->features |= NETIF_F_NOCACHE_COPY;
+ }
+
/* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
* vlan_dev_init() will do the dev->features check, so these features
* are enabled only if supported by underlying device.
goto err_uninit;
dev->reg_state = NETREG_REGISTERED;
- netdev_update_features(dev);
+ __netdev_update_features(dev);
/*
* Default initial state at registry is that the
}
}
+ /* If device can't no cache copy, don't do for all */
+ if (!(one & NETIF_F_NOCACHE_COPY))
+ all &= ~NETIF_F_NOCACHE_COPY;
+
one |= NETIF_F_ALL_CSUM;
one |= all & NETIF_F_ONE_FOR_ALL;
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
+ sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
- !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
struct rtable *rt;
const struct inet_request_sock *ireq = inet_rsk(req);
struct ip_options *opt = inet_rsk(req)->opt;
- struct flowi4 fl4 = {
- .flowi4_oif = sk->sk_bound_dev_if,
- .flowi4_mark = sk->sk_mark,
- .daddr = ((opt && opt->srr) ?
- opt->faddr : ireq->rmt_addr),
- .saddr = ireq->loc_addr,
- .flowi4_tos = RT_CONN_FLAGS(sk),
- .flowi4_proto = sk->sk_protocol,
- .flowi4_flags = inet_sk_flowi_flags(sk),
- .fl4_sport = inet_sk(sk)->inet_sport,
- .fl4_dport = ireq->rmt_port,
- };
struct net *net = sock_net(sk);
+ struct flowi4 fl4;
+ flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+ sk->sk_protocol, inet_sk_flowi_flags(sk),
+ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+ ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt))